Compiler: LIR restructuring

Continuing restructuring of the compiler.  In this installment,
all LIR reverences are moved from compiler_ir down to quick.  Further,
all Portable data is moved to from compiler_ir down to portable.

In short, the great dumping ground of CompilationUnit has been
split into three smaller dumping grounds of MIRGraph, Codegen
and MIRConverter.  From here, subsequent CLs will repartition
those smaller dumping grounds into (hopefully) more coherent classes.
As a result, most function signatures have been altered to remove
the passing around of a CompilationUnit* pointer.

Change-Id: I7195f7baecd81e87786a952e18bbce0b6ceeaac4
diff --git a/src/compiler/dex/backend.h b/src/compiler/dex/backend.h
new file mode 100644
index 0000000..804bc36
--- /dev/null
+++ b/src/compiler/dex/backend.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_BACKEND_H_
+#define ART_SRC_COMPILER_DEX_BACKEND_H_
+
+#include "compiled_method.h"
+
+namespace art {
+
+class Backend {
+
+  public:
+    virtual ~Backend() {};
+    virtual void Materialize() = 0;
+    virtual CompiledMethod* GetCompiledMethod() = 0;
+
+  protected:
+    Backend() {};
+
+};  // Class Backend
+
+}  // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_BACKEND_H_
diff --git a/src/compiler/dex/compiler_internals.h b/src/compiler/dex/compiler_internals.h
index 71792e0..8ef8a71 100644
--- a/src/compiler/dex/compiler_internals.h
+++ b/src/compiler/dex/compiler_internals.h
@@ -24,7 +24,7 @@
 
 #include "base/logging.h"
 #include "class_linker.h"
-#include "quick/codegen.h"
+#include "quick/mir_to_lir.h"
 #include "compiler/driver/compiler_driver.h"
 #include "mir_graph.h"
 #include "compiler_ir.h"
diff --git a/src/compiler/dex/compiler_ir.h b/src/compiler/dex/compiler_ir.h
index efae1f5..d4bf3da 100644
--- a/src/compiler/dex/compiler_ir.h
+++ b/src/compiler/dex/compiler_ir.h
@@ -18,10 +18,9 @@
 #define ART_SRC_COMPILER_DEX_COMPILER_IR_H_
 
 #include <vector>
-
 #include <llvm/IR/Module.h>
-
-#include "compiler/dex/quick/codegen.h"
+#include "compiler/dex/quick/mir_to_lir.h"
+#include "backend.h"
 #include "compiler/driver/compiler_driver.h"
 #include "compiler/driver/dex_compilation_unit.h"
 #include "compiler/llvm/intrinsic_helper.h"
@@ -33,268 +32,16 @@
 
 namespace art {
 
-//TODO: replace these macros
-#define SLOW_FIELD_PATH (cu->enable_debug & (1 << kDebugSlowFieldPath))
-#define SLOW_INVOKE_PATH (cu->enable_debug & (1 << kDebugSlowInvokePath))
-#define SLOW_STRING_PATH (cu->enable_debug & (1 << kDebugSlowStringPath))
-#define SLOW_TYPE_PATH (cu->enable_debug & (1 << kDebugSlowTypePath))
-#define EXERCISE_SLOWEST_STRING_PATH (cu->enable_debug & \
-  (1 << kDebugSlowestStringPath))
-
-// Minimum field size to contain Dalvik v_reg number.
-#define VREG_NUM_WIDTH 16
-
 struct ArenaBitVector;
-struct LIR;
 class LLVMInfo;
 namespace llvm {
 class LlvmCompilationUnit;
 }  // namespace llvm
 
-struct PromotionMap {
-  RegLocationType core_location:3;
-  uint8_t core_reg;
-  RegLocationType fp_location:3;
-  uint8_t FpReg;
-  bool first_in_pair;
-};
-
-struct RegLocation {
-  RegLocationType location:3;
-  unsigned wide:1;
-  unsigned defined:1;   // Do we know the type?
-  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
-  unsigned fp:1;        // Floating point?
-  unsigned core:1;      // Non-floating point?
-  unsigned ref:1;       // Something GC cares about.
-  unsigned high_word:1; // High word of pair?
-  unsigned home:1;      // Does this represent the home location?
-  uint8_t low_reg;      // First physical register.
-  uint8_t high_reg;     // 2nd physical register (if wide).
-  int32_t s_reg_low;    // SSA name for low Dalvik word.
-  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
-                        // and consolodate usage w/ s_reg_low.
-};
-
-struct CompilerTemp {
-  int s_reg;
-  ArenaBitVector* bv;
-};
-
-struct CallInfo {
-  int num_arg_words;    // Note: word count, not arg count.
-  RegLocation* args;    // One for each word of arguments.
-  RegLocation result;   // Eventual target of MOVE_RESULT.
-  int opt_flags;
-  InvokeType type;
-  uint32_t dex_idx;
-  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
-  uintptr_t direct_code;
-  uintptr_t direct_method;
-  RegLocation target;    // Target of following move_result.
-  bool skip_this;
-  bool is_range;
-  int offset;            // Dalvik offset.
-};
-
- /*
- * Data structure tracking the mapping between a Dalvik register (pair) and a
- * native register (pair). The idea is to reuse the previously loaded value
- * if possible, otherwise to keep the value in a native register as long as
- * possible.
- */
-struct RegisterInfo {
-  int reg;                    // Reg number
-  bool in_use;                // Has it been allocated?
-  bool is_temp;               // Can allocate as temp?
-  bool pair;                  // Part of a register pair?
-  int partner;                // If pair, other reg of pair.
-  bool live;                  // Is there an associated SSA name?
-  bool dirty;                 // If live, is it dirty?
-  int s_reg;                  // Name of live value.
-  LIR *def_start;             // Starting inst in last def sequence.
-  LIR *def_end;               // Ending inst in last def sequence.
-};
-
-struct RegisterPool {
-  int num_core_regs;
-  RegisterInfo *core_regs;
-  int next_core_reg;
-  int num_fp_regs;
-  RegisterInfo *FPRegs;
-  int next_fp_reg;
-};
-
-#define INVALID_SREG (-1)
-#define INVALID_VREG (0xFFFFU)
-#define INVALID_REG (0xFF)
-#define INVALID_OFFSET (0xDEADF00FU)
-
-/* SSA encodings for special registers */
-#define SSA_METHOD_BASEREG (-2)
-/* First compiler temp basereg, grows smaller */
-#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
-
-/*
- * Some code patterns cause the generation of excessively large
- * methods - in particular initialization sequences.  There isn't much
- * benefit in optimizing these methods, and the cost can be very high.
- * We attempt to identify these cases, and avoid performing most dataflow
- * analysis.  Two thresholds are used - one for known initializers and one
- * for everything else.
- */
-#define MANY_BLOCKS_INITIALIZER 1000 /* Threshold for switching dataflow off */
-#define MANY_BLOCKS 4000 /* Non-initializer threshold */
-
-// Utility macros to traverse the LIR list.
-#define NEXT_LIR(lir) (lir->next)
-#define PREV_LIR(lir) (lir->prev)
-
-// Defines for alias_info (tracks Dalvik register references).
-#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
-#define DECODE_ALIAS_INFO_WIDE_FLAG     (0x80000000)
-#define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
-#define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
-
-// Common resource macros.
-#define ENCODE_CCODE            (1ULL << kCCode)
-#define ENCODE_FP_STATUS        (1ULL << kFPStatus)
-
-// Abstract memory locations.
-#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
-#define ENCODE_LITERAL          (1ULL << kLiteral)
-#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
-#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
-
-#define ENCODE_ALL              (~0ULL)
-#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
-                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
-
-#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
-
-struct LIR {
-  int offset;               // Offset of this instruction.
-  int dalvik_offset;        // Offset of Dalvik opcode.
-  LIR* next;
-  LIR* prev;
-  LIR* target;
-  int opcode;
-  int operands[5];          // [0..4] = [dest, src1, src2, extra, extra2].
-  struct {
-    bool is_nop:1;          // LIR is optimized away.
-    bool pcRelFixup:1;      // May need pc-relative fixup.
-    unsigned int size:5;    // Note: size is in bytes.
-    unsigned int unused:25;
-  } flags;
-  int alias_info;           // For Dalvik register & litpool disambiguation.
-  uint64_t use_mask;        // Resource mask for use.
-  uint64_t def_mask;        // Resource mask for def.
-};
-
-extern const char* extended_mir_op_names[kMirOpLast - kMirOpFirst];
-
-struct SSARepresentation;
-
-#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
-#define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
-#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
-#define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
-#define MIR_INLINED                     (1 << kMIRInlined)
-#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
-#define MIR_CALLEE                      (1 << kMIRCallee)
-#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
-#define MIR_DUP                         (1 << kMIRDup)
-
-struct Checkstats {
-  int null_checks;
-  int null_checks_eliminated;
-  int range_checks;
-  int range_checks_eliminated;
-};
-
-struct MIR {
-  DecodedInstruction dalvikInsn;
-  unsigned int width;
-  unsigned int offset;
-  int m_unit_index;               // From which method was this MIR included
-  MIR* prev;
-  MIR* next;
-  SSARepresentation* ssa_rep;
-  int optimization_flags;
-  union {
-    // Establish link between two halves of throwing instructions.
-    MIR* throw_insn;
-    // Saved opcode for NOP'd MIRs
-    Instruction::Code original_opcode;
-  } meta;
-};
-
-struct BasicBlockDataFlow {
-  ArenaBitVector* use_v;
-  ArenaBitVector* def_v;
-  ArenaBitVector* live_in_v;
-  ArenaBitVector* phi_v;
-  int* vreg_to_ssa_map;
-  ArenaBitVector* ending_null_check_v;
-};
-
-struct SSARepresentation {
-  int num_uses;
-  int* uses;
-  bool* fp_use;
-  int num_defs;
-  int* defs;
-  bool* fp_def;
-};
-
-struct BasicBlock {
-  int id;
-  int dfs_id;
-  bool visited;
-  bool hidden;
-  bool catch_entry;
-  bool explicit_throw;
-  bool conditional_branch;
-  bool terminated_by_return;        // Block ends with a Dalvik return opcode.
-  bool dominates_return;            // Is a member of return extended basic block.
-  uint16_t start_offset;
-  uint16_t nesting_depth;
-  BBType block_type;
-  MIR* first_mir_insn;
-  MIR* last_mir_insn;
-  BasicBlock* fall_through;
-  BasicBlock* taken;
-  BasicBlock* i_dom;                // Immediate dominator.
-  BasicBlockDataFlow* data_flow_info;
-  GrowableList* predecessors;
-  ArenaBitVector* dominators;
-  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
-  ArenaBitVector* dom_frontier;     // Dominance frontier.
-  struct {                          // For one-to-many successors like.
-    BlockListType block_list_type;  // switch and exception handling.
-    GrowableList blocks;
-  } successor_block_list;
-};
-
-/*
- * The "blocks" field in "successor_block_list" points to an array of
- * elements with the type "SuccessorBlockInfo".
- * For catch blocks, key is type index for the exception.
- * For swtich blocks, key is the case value.
- */
-struct SuccessorBlockInfo {
-  BasicBlock* block;
-  int key;
-};
-
-struct LoopAnalysis;
-struct RegisterPool;
 struct ArenaMemBlock;
 struct Memstats;
 class MIRGraph;
-class Codegen;
-
-#define NOTVISITED (-1)
+class Mir2Lir;
 
 struct CompilationUnit {
   CompilationUnit()
@@ -311,54 +58,21 @@
       disable_opt(0),
       enable_debug(0),
       verbose(false),
-      gen_bitcode(false),
-      disable_dataflow(false),
+      compiler_backend(kNoBackend),
       instruction_set(kNone),
       num_dalvik_registers(0),
       insns(NULL),
       num_ins(0),
       num_outs(0),
       num_regs(0),
-      num_core_spills(0),
-      num_fp_spills(0),
       num_compiler_temps(0),
-      frame_size(0),
-      core_spill_mask(0),
-      fp_spill_mask(0),
-      attributes(0),
       compiler_flip_match(false),
       arena_head(NULL),
       current_arena(NULL),
       num_arena_blocks(0),
       mstats(NULL),
-      checkstats(NULL),
       mir_graph(NULL),
-      cg(NULL),
-      live_sreg(0),
-      llvm_info(NULL),
-      context(NULL),
-      module(NULL),
-      func(NULL),
-      intrinsic_helper(NULL),
-      irb(NULL),
-      placeholder_bb(NULL),
-      entry_bb(NULL),
-      entryTarget_bb(NULL),
-      temp_name(0),
-      first_lir_insn(NULL),
-      last_lir_insn(NULL),
-      literal_list(NULL),
-      method_literal_list(NULL),
-      code_literal_list(NULL),
-      data_offset(0),
-      total_size(0),
-      reg_pool(NULL),
-      reg_location(NULL),
-      promotion_map(NULL),
-      method_sreg(0),
-      block_label_list(NULL),
-      current_dalvik_offset(0)
- {}
+      cg(NULL) {}
   /*
    * Fields needed/generated by common frontend and generally used throughout
    * the compiler.
@@ -375,130 +89,35 @@
   const char* shorty;                  // compiling method's shorty.
   uint32_t disable_opt;                // opt_control_vector flags.
   uint32_t enable_debug;               // debugControlVector flags.
-  std::vector<uint8_t> code_buffer;
   bool verbose;
-  std::vector<uint32_t> combined_mapping_table;
-  std::vector<uint32_t> core_vmap_table;
-  std::vector<uint32_t> fp_vmap_table;
-  std::vector<uint8_t> native_gc_map;
-  bool gen_bitcode;
-  bool disable_dataflow;               // Skip dataflow analysis if possible
+  CompilerBackend compiler_backend;
   InstructionSet instruction_set;
 
-  // CLEANUP: much of this info available elsewhere.  Go to the original source?
+  // TODO: much of this info available elsewhere.  Go to the original source?
   int num_dalvik_registers;        // method->registers_size.
   const uint16_t* insns;
-  /*
-   * Frame layout details.
-   * NOTE: for debug support it will be necessary to add a structure
-   * to map the Dalvik virtual registers to the promoted registers.
-   * NOTE: "num" fields are in 4-byte words, "Size" and "Offset" in bytes.
-   */
   int num_ins;
   int num_outs;
   int num_regs;            // Unlike num_dalvik_registers, does not include ins.
-  int num_core_spills;
-  int num_fp_spills;
+
+  // TODO: may want to move this to MIRGraph.
   int num_compiler_temps;
-  int frame_size;
-  unsigned int core_spill_mask;
-  unsigned int fp_spill_mask;
-  unsigned int attributes;
+
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
   // Flips sense of compiler_method_match - apply flags if doesn't match.
   bool compiler_flip_match;
+
+  // TODO: move memory management to mir_graph, or just switch to using standard containers.
   ArenaMemBlock* arena_head;
   ArenaMemBlock* current_arena;
   int num_arena_blocks;
   Memstats* mstats;
-  Checkstats* checkstats;
+
   UniquePtr<MIRGraph> mir_graph;   // MIR container.
-  UniquePtr<Codegen> cg;           // Target-specific codegen.
-  /*
-   * Sanity checking for the register temp tracking.  The same ssa
-   * name should never be associated with one temp register per
-   * instruction compilation.
-   */
-  int live_sreg;
-
-  // Fields for Portable
-  llvm::LlvmCompilationUnit* llvm_compilation_unit;
- /*
-  * Fields needed by GBC creation.  Candidates for moving to a new MIR to
-  * llvm bitcode class.
-  */
-  LLVMInfo* llvm_info;
-  std::string symbol;
-  ::llvm::LLVMContext* context;
-  ::llvm::Module* module;
-  ::llvm::Function* func;
-  art::llvm::IntrinsicHelper* intrinsic_helper;
-  art::llvm::IRBuilder* irb;
-  ::llvm::BasicBlock* placeholder_bb;
-  ::llvm::BasicBlock* entry_bb;
-  ::llvm::BasicBlock* entryTarget_bb;
-
-  std::string bitcode_filename;
-  GrowableList llvm_values;
-  int32_t temp_name;
-  SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map;  // block id -> llvm bb.
-
- /*
-  * Fields needed by the Quick backend.  Candidates for moving to a new
-  * QuickBackend class.
-  */
-  LIR* first_lir_insn;
-  LIR* last_lir_insn;
-  LIR* literal_list;                   // Constants.
-  LIR* method_literal_list;            // Method literals requiring patching.
-  LIR* code_literal_list;              // Code literals requiring patching.
-  int data_offset;                     // starting offset of literal pool.
-  int total_size;                      // header + code size.
-  RegisterPool* reg_pool;
-  // Map SSA names to location.
-  RegLocation* reg_location;
-  // Keep track of Dalvik v_reg to physical register mappings.
-  PromotionMap* promotion_map;
-  // SSA name for Method*.
-  int method_sreg;
-  RegLocation method_loc;          // Describes location of method*.
-  GrowableList throw_launchpads;
-  GrowableList suspend_launchpads;
-  GrowableList intrinsic_launchpads;
-  GrowableList compiler_temps;
-  LIR* block_label_list;
-  /*
-   * TODO: The code generation utilities don't have a built-in
-   * mechanism to propagate the original Dalvik opcode address to the
-   * associated generated instructions.  For the trace compiler, this wasn't
-   * necessary because the interpreter handled all throws and debugging
-   * requests.  For now we'll handle this by placing the Dalvik offset
-   * in the CompilationUnit struct before codegen for each instruction.
-   * The low-level LIR creation utilites will pull it from here.  Rework this.
-   */
-  int current_dalvik_offset;
-  GrowableList switch_tables;
-  GrowableList fill_array_data;
-  SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache.
-  SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache.
-  /*
-   * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
-   * Native PC is on the return address of the safepointed operation.  Dex PC is for
-   * the instruction being executed at the safepoint.
-   */
-  std::vector<uint32_t> pc2dexMappingTable;
-  /*
-   * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
-   * immediately preceed the instruction.
-   */
-  std::vector<uint32_t> dex2pcMappingTable;
+  UniquePtr<Backend> cg;           // Target-specific codegen.
 };
 
-// TODO: move this
-int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
-
-
 }  // namespace art
 
 #endif // ART_SRC_COMPILER_DEX_COMPILER_IR_H_
diff --git a/src/compiler/dex/compiler_utility.cc b/src/compiler/dex/compiler_utility.cc
index 82a156d..71c9713 100644
--- a/src/compiler/dex/compiler_utility.cc
+++ b/src/compiler/dex/compiler_utility.cc
@@ -19,23 +19,6 @@
 
 namespace art {
 
-const char* extended_mir_op_names[kMirOpLast - kMirOpFirst] = {
-  "Phi",
-  "Copy",
-  "FusedCmplFloat",
-  "FusedCmpgFloat",
-  "FusedCmplDouble",
-  "FusedCmpgDouble",
-  "FusedCmpLong",
-  "Nop",
-  "OpNullCheck",
-  "OpRangeCheck",
-  "OpDivZeroCheck",
-  "Check1",
-  "Check2",
-  "Select",
-};
-
 #ifdef WITH_MEMSTATS
 struct Memstats {
   uint32_t alloc_stats[kNumAllocKinds];
@@ -301,9 +284,6 @@
     LOG(INFO) << "MEMUSAGE: " << total << " : "
         << PrettyMethod(cu->method_idx, *cu->dex_file);
     LOG(INFO) << "insns_size: " << cu->code_item->insns_size_in_code_units_;
-    if (cu->disable_dataflow) {
-        LOG(INFO) << " ** Dataflow disabled ** ";
-    }
     LOG(INFO) << "===== Overall allocations";
     for (int i = 0; i < kNumAllocKinds; i++) {
         LOG(INFO) << alloc_names[i] << std::setw(10) <<
@@ -328,43 +308,6 @@
 }
 #endif
 
-/* Debug Utility - dump a compilation unit */
-void DumpCompilationUnit(CompilationUnit* cu)
-{
-  BasicBlock* bb;
-  const char* block_type_names[] = {
-    "Entry Block",
-    "Code Block",
-    "Exit Block",
-    "Exception Handling",
-    "Catch Block"
-  };
-
-  LOG(INFO) << "Compiling " << PrettyMethod(cu->method_idx, *cu->dex_file);
-  LOG(INFO) << cu->insns << " insns";
-  LOG(INFO) << cu->mir_graph->GetNumBlocks() << " blocks in total";
-  GrowableListIterator iterator = cu->mir_graph->GetBasicBlockIterator();
-
-  while (true) {
-    bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
-    if (bb == NULL) break;
-    LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
-        bb->id,
-        block_type_names[bb->block_type],
-        bb->start_offset,
-        bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
-        bb->last_mir_insn ? "" : " empty");
-    if (bb->taken) {
-      LOG(INFO) << "  Taken branch: block " << bb->taken->id
-                << "(0x" << std::hex << bb->taken->start_offset << ")";
-    }
-    if (bb->fall_through) {
-      LOG(INFO) << "  Fallthrough : block " << bb->fall_through->id
-                << " (0x" << std::hex << bb->fall_through->start_offset << ")";
-    }
-  }
-}
-
 static uint32_t check_masks[32] = {
   0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
   0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
@@ -669,34 +612,6 @@
   }
 }
 
-void GetBlockName(BasicBlock* bb, char* name)
-{
-  switch (bb->block_type) {
-    case kEntryBlock:
-      snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
-      break;
-    case kExitBlock:
-      snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
-      break;
-    case kDalvikByteCode:
-      snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
-      break;
-    case kExceptionHandling:
-      snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
-               bb->id);
-      break;
-    default:
-      snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
-      break;
-  }
-}
-
-const char* GetShortyFromTargetIdx(CompilationUnit *cu, int target_idx)
-{
-  const DexFile::MethodId& method_id = cu->dex_file->GetMethodId(target_idx);
-  return cu->dex_file->GetShorty(method_id.proto_idx_);
-}
-
 /* Allocate a new basic block */
 BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
 {
@@ -708,270 +623,8 @@
   CompilerInitGrowableList(cu, bb->predecessors,
                       (block_type == kExitBlock) ? 2048 : 2,
                       kListPredecessors);
-  cu->block_id_map.Put(block_id, block_id);
+  cu->mir_graph->block_id_map_.Put(block_id, block_id);
   return bb;
 }
 
-/* Insert an MIR instruction to the end of a basic block */
-void AppendMIR(BasicBlock* bb, MIR* mir)
-{
-  if (bb->first_mir_insn == NULL) {
-    DCHECK(bb->last_mir_insn == NULL);
-    bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
-  } else {
-    bb->last_mir_insn->next = mir;
-    mir->prev = bb->last_mir_insn;
-    mir->next = NULL;
-    bb->last_mir_insn = mir;
-  }
-}
-
-/* Insert an MIR instruction to the head of a basic block */
-void PrependMIR(BasicBlock* bb, MIR* mir)
-{
-  if (bb->first_mir_insn == NULL) {
-    DCHECK(bb->last_mir_insn == NULL);
-    bb->last_mir_insn = bb->first_mir_insn = mir;
-    mir->prev = mir->next = NULL;
-  } else {
-    bb->first_mir_insn->prev = mir;
-    mir->next = bb->first_mir_insn;
-    mir->prev = NULL;
-    bb->first_mir_insn = mir;
-  }
-}
-
-/* Insert a MIR instruction after the specified MIR */
-void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
-{
-  new_mir->prev = current_mir;
-  new_mir->next = current_mir->next;
-  current_mir->next = new_mir;
-
-  if (new_mir->next) {
-    /* Is not the last MIR in the block */
-    new_mir->next->prev = new_mir;
-  } else {
-    /* Is the last MIR in the block */
-    bb->last_mir_insn = new_mir;
-  }
-}
-
-/*
- * Append an LIR instruction to the LIR list maintained by a compilation
- * unit
- */
-void AppendLIR(CompilationUnit *cu, LIR* lir)
-{
-  if (cu->first_lir_insn == NULL) {
-    DCHECK(cu->last_lir_insn == NULL);
-     cu->last_lir_insn = cu->first_lir_insn = lir;
-    lir->prev = lir->next = NULL;
-  } else {
-    cu->last_lir_insn->next = lir;
-    lir->prev = cu->last_lir_insn;
-    lir->next = NULL;
-    cu->last_lir_insn = lir;
-  }
-}
-
-/*
- * Insert an LIR instruction before the current instruction, which cannot be the
- * first instruction.
- *
- * prev_lir <-> new_lir <-> current_lir
- */
-void InsertLIRBefore(LIR* current_lir, LIR* new_lir)
-{
-  DCHECK(current_lir->prev != NULL);
-  LIR *prev_lir = current_lir->prev;
-
-  prev_lir->next = new_lir;
-  new_lir->prev = prev_lir;
-  new_lir->next = current_lir;
-  current_lir->prev = new_lir;
-}
-
-/*
- * Insert an LIR instruction after the current instruction, which cannot be the
- * first instruction.
- *
- * current_lir -> new_lir -> old_next
- */
-void InsertLIRAfter(LIR* current_lir, LIR* new_lir)
-{
-  new_lir->prev = current_lir;
-  new_lir->next = current_lir->next;
-  current_lir->next = new_lir;
-  new_lir->next->prev = new_lir;
-}
-
-/* Turn method name into a legal Linux file name */
-void ReplaceSpecialChars(std::string& str)
-{
-  static const struct { const char before; const char after; } match[] =
-      {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
-       {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
-  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
-    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
-  }
-}
-
-std::string GetSSAName(const CompilationUnit* cu, int ssa_reg)
-{
-  return StringPrintf("v%d_%d", cu->mir_graph->SRegToVReg(ssa_reg), cu->mir_graph->GetSSASubscript(ssa_reg));
-}
-
-// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
-std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only)
-{
-  if (cu->reg_location == NULL) {
-    // Pre-SSA - just use the standard name
-    return GetSSAName(cu, ssa_reg);
-  }
-  if (cu->mir_graph->IsConst(cu->reg_location[ssa_reg])) {
-    if (!singles_only && cu->reg_location[ssa_reg].wide) {
-      return StringPrintf("v%d_%d#0x%llx", cu->mir_graph->SRegToVReg(ssa_reg),
-                          cu->mir_graph->GetSSASubscript(ssa_reg),
-                          cu->mir_graph->ConstantValueWide(cu->reg_location[ssa_reg]));
-    } else {
-      return StringPrintf("v%d_%d#0x%x", cu->mir_graph->SRegToVReg(ssa_reg),
-                          cu->mir_graph->GetSSASubscript(ssa_reg),
-                          cu->mir_graph->ConstantValue(cu->reg_location[ssa_reg]));
-    }
-  } else {
-    return StringPrintf("v%d_%d", cu->mir_graph->SRegToVReg(ssa_reg),
-                        cu->mir_graph->GetSSASubscript(ssa_reg));
-  }
-}
-
-char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir)
-{
-  DecodedInstruction insn = mir->dalvikInsn;
-  std::string str;
-  int flags = 0;
-  int opcode = insn.opcode;
-  char* ret;
-  bool nop = false;
-  SSARepresentation* ssa_rep = mir->ssa_rep;
-  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
-  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
-
-  // Handle special cases.
-  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
-    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
-    str.append(": ");
-    // Recover the original Dex instruction
-    insn = mir->meta.throw_insn->dalvikInsn;
-    ssa_rep = mir->meta.throw_insn->ssa_rep;
-    defs = ssa_rep->num_defs;
-    uses = ssa_rep->num_uses;
-    opcode = insn.opcode;
-  } else if (opcode == kMirOpNop) {
-    str.append("[");
-    insn.opcode = mir->meta.original_opcode;
-    opcode = mir->meta.original_opcode;
-    nop = true;
-  }
-
-  if (opcode >= kMirOpFirst) {
-    str.append(extended_mir_op_names[opcode - kMirOpFirst]);
-  } else {
-    dalvik_format = Instruction::FormatOf(insn.opcode);
-    flags = Instruction::FlagsOf(insn.opcode);
-    str.append(Instruction::Name(insn.opcode));
-  }
-
-  if (opcode == kMirOpPhi) {
-    int* incoming = reinterpret_cast<int*>(insn.vB);
-    str.append(StringPrintf(" %s = (%s",
-               GetSSANameWithConst(cu, ssa_rep->defs[0], true).c_str(),
-               GetSSANameWithConst(cu, ssa_rep->uses[0], true).c_str()));
-    str.append(StringPrintf(":%d",incoming[0]));
-    int i;
-    for (i = 1; i < uses; i++) {
-      str.append(StringPrintf(", %s:%d",
-                              GetSSANameWithConst(cu, ssa_rep->uses[i], true).c_str(),
-                              incoming[i]));
-    }
-    str.append(")");
-  } else if ((flags & Instruction::kBranch) != 0) {
-    // For branches, decode the instructions to print out the branch targets.
-    int offset = 0;
-    switch (dalvik_format) {
-      case Instruction::k21t:
-        str.append(StringPrintf(" %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str()));
-        offset = insn.vB;
-        break;
-      case Instruction::k22t:
-        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str(),
-                   GetSSANameWithConst(cu, ssa_rep->uses[1], false).c_str()));
-        offset = insn.vC;
-        break;
-      case Instruction::k10t:
-      case Instruction::k20t:
-      case Instruction::k30t:
-        offset = insn.vA;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
-    }
-    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
-                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
-  } else {
-    // For invokes-style formats, treat wide regs as a pair of singles
-    bool show_singles = ((dalvik_format == Instruction::k35c) ||
-                         (dalvik_format == Instruction::k3rc));
-    if (defs != 0) {
-      str.append(StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->defs[0], false).c_str()));
-      if (uses != 0) {
-        str.append(", ");
-      }
-    }
-    for (int i = 0; i < uses; i++) {
-      str.append(
-          StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->uses[i], show_singles).c_str()));
-      if (!show_singles && (cu->reg_location != NULL) && cu->reg_location[i].wide) {
-        // For the listing, skip the high sreg.
-        i++;
-      }
-      if (i != (uses -1)) {
-        str.append(",");
-      }
-    }
-    switch (dalvik_format) {
-      case Instruction::k11n: // Add one immediate from vB
-      case Instruction::k21s:
-      case Instruction::k31i:
-      case Instruction::k21h:
-        str.append(StringPrintf(", #%d", insn.vB));
-        break;
-      case Instruction::k51l: // Add one wide immediate
-        str.append(StringPrintf(", #%lld", insn.vB_wide));
-        break;
-      case Instruction::k21c: // One register, one string/type/method index
-      case Instruction::k31c:
-        str.append(StringPrintf(", index #%d", insn.vB));
-        break;
-      case Instruction::k22c: // Two registers, one string/type/method index
-        str.append(StringPrintf(", index #%d", insn.vC));
-        break;
-      case Instruction::k22s: // Add one immediate from vC
-      case Instruction::k22b:
-        str.append(StringPrintf(", #%d", insn.vC));
-        break;
-      default:
-        ; // Nothing left to print
-      }
-  }
-  if (nop) {
-    str.append("]--optimized away");
-  }
-  int length = str.length() + 1;
-  ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
-  strncpy(ret, str.c_str(), length);
-  return ret;
-}
 }  // namespace art
diff --git a/src/compiler/dex/compiler_utility.h b/src/compiler/dex/compiler_utility.h
index 4201f5d..b2dcd44 100644
--- a/src/compiler/dex/compiler_utility.h
+++ b/src/compiler/dex/compiler_utility.h
@@ -84,8 +84,6 @@
   kNumBitMapKinds
 };
 
-// Allocate the initial memory block for arena-based allocation.
-bool HeapInit(CompilationUnit* cu);
 
 // Uncomment to collect memory usage statistics.
 //#define WITH_MEMSTATS
@@ -97,10 +95,6 @@
   char ptr[0];
 };
 
-void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind);
-
-void ArenaReset(CompilationUnit *cu);
-
 struct GrowableList {
   GrowableList() : num_allocated(0), num_used(0), elem_list(NULL) {
   }
@@ -146,11 +140,13 @@
 // Forward declarations
 struct BasicBlock;
 struct CompilationUnit;
-struct LIR;
-struct RegLocation;
-struct MIR;
 enum BBType;
 
+// Allocate the initial memory block for arena-based allocation.
+bool HeapInit(CompilationUnit* cu);
+void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind);
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
+void ArenaReset(CompilationUnit *cu);
 void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
                               size_t init_length, oat_list_kind kind = kListMisc);
 void ReallocGrowableList(CompilationUnit* cu, GrowableList* g_list, size_t new_length);
@@ -177,25 +173,15 @@
 bool CompareBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
 bool TestBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
 int CountSetBits(const ArenaBitVector* p_bits);
-void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr);
-void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
 void DumpBlockBitVector(const GrowableList* blocks, char* msg, const ArenaBitVector* bv,
                         int length);
-void GetBlockName(BasicBlock* bb, char* name);
-const char* GetShortyFromTargetIdx(CompilationUnit*, int);
 void DumpMemStats(CompilationUnit* cu);
-void DumpCompilationUnit(CompilationUnit* cu);
-BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
-void AppendMIR(BasicBlock* bb, MIR* mir);
-void PrependMIR(BasicBlock* bb, MIR* mir);
-void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
-void AppendLIR(CompilationUnit *cu, LIR* lir);
-void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
-void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
+
 void ReplaceSpecialChars(std::string& str);
-char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir);
 std::string GetSSAName(const CompilationUnit* cu, int ssa_reg);
 std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only);
+void GetBlockName(BasicBlock* bb, char* name);
+const char* GetShortyFromTargetIdx(CompilationUnit*, int);
 
 }  // namespace art
 
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
index 049d662..e99c196 100644
--- a/src/compiler/dex/frontend.cc
+++ b/src/compiler/dex/frontend.cc
@@ -21,13 +21,12 @@
 #include "dataflow_iterator.h"
 #if defined(ART_USE_PORTABLE_COMPILER)
 #include "compiler/llvm/llvm_compilation_unit.h"
+#include "compiler/dex/portable/mir_to_gbc.h"
 #endif
 #include "leb128.h"
 #include "mirror/object.h"
 #include "runtime.h"
-#include "quick/codegen_util.h"
-#include "portable/mir_to_gbc.h"
-#include "quick/mir_to_lir.h"
+#include "backend.h"
 
 namespace {
 #if !defined(ART_USE_PORTABLE_COMPILER)
@@ -116,9 +115,6 @@
 {
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
 
-  // FIXME: now we detect this in MIRGraph.
-  SpecialCaseHandler special_case = kNoHandler;
-
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   UniquePtr<CompilationUnit> cu(new CompilationUnit);
 
@@ -129,17 +125,12 @@
   cu->compiler_driver = &compiler;
   cu->class_linker = class_linker;
   cu->instruction_set = compiler.GetInstructionSet();
+  cu->compiler_backend = compiler_backend;
   DCHECK((cu->instruction_set == kThumb2) ||
          (cu->instruction_set == kX86) ||
          (cu->instruction_set == kMips));
 
-  cu->gen_bitcode = (compiler_backend == kPortable);
 
-#if defined(ART_USE_PORTABLE_COMPILER)
-  cu->llvm_compilation_unit = llvm_compilation_unit;
-  cu->llvm_info = llvm_compilation_unit->GetQuickContext();
-  cu->symbol = llvm_compilation_unit->GetDexCompilationUnit()->GetSymbol();
-#endif
   /* Adjust this value accordingly once inlining is performed */
   cu->num_dalvik_registers = code_item->registers_size_;
   // TODO: set this from command line
@@ -155,9 +146,14 @@
         (cu->enable_debug & (1 << kDebugVerbose));
   }
 
-  // If debug build, always verify bitcode.
-  if (kIsDebugBuild && cu->gen_bitcode) {
-    cu->enable_debug |= (1 << kDebugVerifyBitcode);
+  /*
+   * TODO: rework handling of optimization and debug flags.  Should we split out
+   * MIR and backend flags?  Need command-line setting as well.
+   */
+
+  if (compiler_backend == kPortable) {
+    // Fused long branches not currently usseful in bitcode.
+    cu->disable_opt |= (1 << kBranchFusing);
   }
 
   if (cu->instruction_set == kMips) {
@@ -175,9 +171,6 @@
         (1 << kPromoteCompilerTemps));
   }
 
-  /* Assume leaf */
-  cu->attributes = METHOD_IS_LEAF;
-
   cu->mir_graph.reset(new MIRGraph(cu.get()));
 
   /* Gathering opcode stats? */
@@ -192,10 +185,6 @@
   /* Do a code layout pass */
   cu->mir_graph->CodeLayout();
 
-  if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
-    cu->mir_graph->VerifyDataflow();
-  }
-
   /* Perform SSA transformation for the whole method */
   cu->mir_graph->SSATransformation();
 
@@ -218,123 +207,42 @@
     cu->mir_graph->DumpCheckStats();
   }
 
+  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+    cu->mir_graph->ShowOpcodeStats();
+  }
+
   /* Set up regLocation[] array to describe values - one for each ssa_name. */
   cu->mir_graph->BuildRegLocations();
 
+  CompiledMethod* result = NULL;
+
 #if defined(ART_USE_PORTABLE_COMPILER)
-  /* Go the LLVM path? */
-  if (cu->gen_bitcode) {
-    // MIR->Bitcode
-    MethodMIR2Bitcode(cu.get());
-    if (compiler_backend == kPortable) {
-      // all done
-      ArenaReset(cu.get());
-      return NULL;
-    }
+  if (compiler_backend == kPortable) {
+    cu->cg.reset(PortableCodeGenerator(cu.get(), cu->mir_graph.get(), llvm_compilation_unit));
   } else
 #endif
   {
     switch (compiler.GetInstructionSet()) {
       case kThumb2:
-        InitArmCodegen(cu.get()); break;
+        cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get())); break;
       case kMips:
-        InitMipsCodegen(cu.get()); break;
+        cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get())); break;
       case kX86:
-        InitX86Codegen(cu.get()); break;
+        cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get())); break;
       default:
         LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
     }
-
-// ** MOVE ALL OF THIS TO Codegen.materialize()
-
-  /* Initialize the switch_tables list */                       // TO CODEGEN
-  CompilerInitGrowableList(cu.get(), &cu->switch_tables, 4,
-                      kListSwitchTables);
-
-  /* Intialize the fill_array_data list */                     // TO CODEGEN
-  CompilerInitGrowableList(cu.get(), &cu->fill_array_data, 4,
-                      kListFillArrayData);
-
-  /* Intialize the throw_launchpads list, estimate size based on insns_size */ // TO CODEGEN
-  CompilerInitGrowableList(cu.get(), &cu->throw_launchpads, code_item->insns_size_in_code_units_,
-                      kListThrowLaunchPads);
-
-  /* Intialize the instrinsic_launchpads list */  // TO_CODEGEN
-  CompilerInitGrowableList(cu.get(), &cu->intrinsic_launchpads, 4,
-                      kListMisc);
-
-
-  /* Intialize the suspend_launchpads list */ // TO_CODEGEN
-  CompilerInitGrowableList(cu.get(), &cu->suspend_launchpads, 2048,
-                      kListSuspendLaunchPads);
-
-    // TODO: Push these to codegen
-    cu.get()->cg->CompilerInitializeRegAlloc(cu.get());  // Needs to happen after SSA naming
-
-    /* Allocate Registers using simple local allocation scheme */
-    cu.get()->cg->SimpleRegAlloc(cu.get());
-
-    if (special_case != kNoHandler) {
-      /*
-       * Custom codegen for special cases.  If for any reason the
-       * special codegen doesn't succeed, cu->first_lir_insn will
-       * set to NULL;
-       */
-      SpecialMIR2LIR(cu.get(), special_case);
-    }
-
-    /* Convert MIR to LIR, etc. */
-    if (cu->first_lir_insn == NULL) {
-      MethodMIR2LIR(cu.get());
-    }
   }
 
-  /* Method is not empty */
-  if (cu->first_lir_insn) {
+  cu->cg->Materialize();
 
-    // mark the targets of switch statement case labels
-    ProcessSwitchTables(cu.get());
+  result = cu->cg->GetCompiledMethod();
 
-    /* Convert LIR into machine code. */
-    AssembleLIR(cu.get());
-
-    if (cu->verbose) {
-      CodegenDump(cu.get());
-    }
-
-  }
-
-  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu->mir_graph->ShowOpcodeStats();
-  }
-
-  // Combine vmap tables - core regs, then fp regs - into vmap_table
-  std::vector<uint16_t> vmap_table;
-  // Core regs may have been inserted out of order - sort first
-  std::sort(cu->core_vmap_table.begin(), cu->core_vmap_table.end());
-  for (size_t i = 0 ; i < cu->core_vmap_table.size(); i++) {
-    // Copy, stripping out the phys register sort key
-    vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & cu->core_vmap_table[i]);
-  }
-  // If we have a frame, push a marker to take place of lr
-  if (cu->frame_size > 0) {
-    vmap_table.push_back(INVALID_VREG);
+  if (result) {
+    VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file);
   } else {
-    DCHECK_EQ(__builtin_popcount(cu->core_spill_mask), 0);
-    DCHECK_EQ(__builtin_popcount(cu->fp_spill_mask), 0);
+    VLOG(compiler) << "Deferred " << PrettyMethod(method_idx, dex_file);
   }
-  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
-  for (uint32_t i = 0; i < cu->fp_vmap_table.size(); i++) {
-    vmap_table.push_back(cu->fp_vmap_table[i]);
-  }
-  CompiledMethod* result =
-      new CompiledMethod(cu->instruction_set, cu->code_buffer,
-                         cu->frame_size, cu->core_spill_mask, cu->fp_spill_mask,
-                         cu->combined_mapping_table, vmap_table, cu->native_gc_map);
-
-  VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file)
-     << " (" << (cu->code_buffer.size() * sizeof(cu->code_buffer[0]))
-     << " bytes)";
 
 #ifdef WITH_MEMSTATS
   if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
diff --git a/src/compiler/dex/frontend.h b/src/compiler/dex/frontend.h
index 874ee0b..2e62dc8 100644
--- a/src/compiler/dex/frontend.h
+++ b/src/compiler/dex/frontend.h
@@ -50,6 +50,7 @@
   kBBOpt,
   kMatch,
   kPromoteCompilerTemps,
+  kBranchFusing,
 };
 
 // Force code generation paths for testing.
@@ -73,14 +74,6 @@
   kDebugVerifyBitcode,
 };
 
-enum OatMethodAttributes {
-  kIsLeaf,            // Method is leaf.
-  kHasLoop,           // Method contains simple loop.
-};
-
-#define METHOD_IS_LEAF          (1 << kIsLeaf)
-#define METHOD_HAS_LOOP         (1 << kHasLoop)
-
 class LLVMInfo {
   public:
     LLVMInfo();
diff --git a/src/compiler/dex/mir_dataflow.cc b/src/compiler/dex/mir_dataflow.cc
index 0c767aa..5e6eb82 100644
--- a/src/compiler/dex/mir_dataflow.cc
+++ b/src/compiler/dex/mir_dataflow.cc
@@ -29,7 +29,7 @@
  * TODO - many optimization flags are incomplete - they will only limit the
  * scope of optimizations but will not cause mis-optimizations.
  */
-const int oat_data_flow_attributes[kMirOpLast] = {
+const int MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
   // 00 NOP
   DF_NOP,
 
@@ -883,7 +883,7 @@
       AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
     DecodedInstruction *d_insn = &mir->dalvikInsn;
 
     if (df_attributes & DF_HAS_USES) {
@@ -934,7 +934,7 @@
   SetNumSSARegs(ssa_reg + 1);
   InsertGrowableList(cu_, ssa_base_vregs_, v_reg);
   InsertGrowableList(cu_, ssa_subscripts_, subscript);
-  std::string ssa_name = GetSSAName(cu_, ssa_reg);
+  std::string ssa_name = GetSSAName(ssa_reg);
   char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, false, kAllocDFInfo));
   strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
   InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
@@ -1005,7 +1005,7 @@
     mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu_, sizeof(SSARepresentation),
                                                                  true, kAllocDFInfo));
 
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
       // If not a pseudo-op, note non-leaf or can throw
     if (static_cast<int>(mir->dalvikInsn.opcode) <
@@ -1013,7 +1013,7 @@
       int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
 
       if (flags & Instruction::kInvoke) {
-        cu_->attributes &= ~METHOD_IS_LEAF;
+        attributes_ &= ~METHOD_IS_LEAF;
       }
     }
 
@@ -1114,19 +1114,17 @@
     }
   }
 
-  if (!cu_->disable_dataflow) {
-    /*
-     * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
-     * input to PHI nodes can be derived from the snapshot of all
-     * predecessor blocks.
-     */
-    bb->data_flow_info->vreg_to_ssa_map =
-        static_cast<int*>(NewMem(cu_, sizeof(int) * cu_->num_dalvik_registers, false,
-                                 kAllocDFInfo));
+  /*
+   * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+   * input to PHI nodes can be derived from the snapshot of all
+   * predecessor blocks.
+   */
+  bb->data_flow_info->vreg_to_ssa_map =
+      static_cast<int*>(NewMem(cu_, sizeof(int) * cu_->num_dalvik_registers, false,
+                               kAllocDFInfo));
 
-    memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
-           sizeof(int) * cu_->num_dalvik_registers);
-  }
+  memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
+         sizeof(int) * cu_->num_dalvik_registers);
   return true;
 }
 
@@ -1163,7 +1161,7 @@
   for (i = 0; i < num_dalvik_reg; i++) {
     InsertGrowableList(cu_, ssa_base_vregs_, i);
     InsertGrowableList(cu_, ssa_subscripts_, 0);
-    std::string ssa_name = GetSSAName(cu_, i);
+    std::string ssa_name = GetSSAName(i);
     char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, true, kAllocDFInfo));
     strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
     InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
@@ -1185,7 +1183,7 @@
   }
 
   /* Add ssa reg for Method* */
-  cu_->method_sreg = AddNewSReg(SSA_METHOD_BASEREG);
+  method_sreg_ = AddNewSReg(SSA_METHOD_BASEREG);
 
   /*
    * Allocate the BasicBlockDataFlow structure for the entry and code blocks
@@ -1283,7 +1281,7 @@
       use_counts_.elem_list[s_reg] += (1 << weight);
     }
     if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
-      int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+      int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
       // Implicit use of Method* ? */
       if (df_attributes & DF_UMS) {
         /*
@@ -1298,8 +1296,8 @@
           uses_method_star &= InvokeUsesMethodStar(mir);
         }
         if (uses_method_star) {
-          raw_use_counts_.elem_list[cu_->method_sreg]++;
-          use_counts_.elem_list[cu_->method_sreg] += (1 << weight);
+          raw_use_counts_.elem_list[method_sreg_]++;
+          use_counts_.elem_list[method_sreg_] += (1 << weight);
         }
       }
     }
diff --git a/src/compiler/dex/mir_graph.cc b/src/compiler/dex/mir_graph.cc
index 71aaa38..a6e9556 100644
--- a/src/compiler/dex/mir_graph.cc
+++ b/src/compiler/dex/mir_graph.cc
@@ -53,8 +53,26 @@
   {{Instruction::RETURN_WIDE}, kIdentity},
 };
 
+const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
+  "Phi",
+  "Copy",
+  "FusedCmplFloat",
+  "FusedCmpgFloat",
+  "FusedCmplDouble",
+  "FusedCmpgDouble",
+  "FusedCmpLong",
+  "Nop",
+  "OpNullCheck",
+  "OpRangeCheck",
+  "OpDivZeroCheck",
+  "Check1",
+  "Check2",
+  "Select",
+};
+
 MIRGraph::MIRGraph(CompilationUnit* cu)
-    : cu_(cu),
+    : reg_location_(NULL),
+      cu_(cu),
       ssa_base_vregs_(NULL),
       ssa_subscripts_(NULL),
       ssa_strings_(NULL),
@@ -78,7 +96,10 @@
       current_offset_(kInvalidEntry),
       def_count_(0),
       opcode_count_(NULL),
-      num_ssa_regs_(0) {
+      num_ssa_regs_(0),
+      method_sreg_(0),
+      attributes_(METHOD_IS_LEAF)  // Start with leaf assumption, change on encountering invoke.
+      {
   CompilerInitGrowableList(cu, &block_list_, 0, kListBlockList);
   try_block_addr_ = AllocBitVector(cu, 0, true /* expandable */);
 }
@@ -623,7 +644,7 @@
     code_ptr += width;
     int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
 
-    int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode];
+    int df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
 
     if (df_flags & DF_HAS_DEFS) {
       def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
@@ -684,7 +705,7 @@
   }
 
   if (cu_->verbose) {
-    DumpCompilationUnit(cu_);
+    DumpMIRGraph();
   }
 }
 
@@ -738,9 +759,9 @@
         for (mir = bb->first_mir_insn; mir; mir = mir->next) {
             int opcode = mir->dalvikInsn.opcode;
             fprintf(file, "    {%04x %s %s %s\\l}%s\\\n", mir->offset,
-                    mir->ssa_rep ? GetDalvikDisassembly(cu_, mir) :
+                    mir->ssa_rep ? GetDalvikDisassembly(mir) :
                     (opcode < kMirOpFirst) ?  Instruction::Name(mir->dalvikInsn.opcode) :
-                    extended_mir_op_names[opcode - kMirOpFirst],
+                    extended_mir_op_names_[opcode - kMirOpFirst],
                     (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
                     (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
                     mir->next ? " | " : " ");
@@ -837,4 +858,315 @@
   fclose(file);
 }
 
+/* Insert an MIR instruction to the end of a basic block */
+void MIRGraph::AppendMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->last_mir_insn->next = mir;
+    mir->prev = bb->last_mir_insn;
+    mir->next = NULL;
+    bb->last_mir_insn = mir;
+  }
+}
+
+/* Insert an MIR instruction to the head of a basic block */
+void MIRGraph::PrependMIR(BasicBlock* bb, MIR* mir)
+{
+  if (bb->first_mir_insn == NULL) {
+    DCHECK(bb->last_mir_insn == NULL);
+    bb->last_mir_insn = bb->first_mir_insn = mir;
+    mir->prev = mir->next = NULL;
+  } else {
+    bb->first_mir_insn->prev = mir;
+    mir->next = bb->first_mir_insn;
+    mir->prev = NULL;
+    bb->first_mir_insn = mir;
+  }
+}
+
+/* Insert a MIR instruction after the specified MIR */
+void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
+{
+  new_mir->prev = current_mir;
+  new_mir->next = current_mir->next;
+  current_mir->next = new_mir;
+
+  if (new_mir->next) {
+    /* Is not the last MIR in the block */
+    new_mir->next->prev = new_mir;
+  } else {
+    /* Is the last MIR in the block */
+    bb->last_mir_insn = new_mir;
+  }
+}
+
+char* MIRGraph::GetDalvikDisassembly(const MIR* mir)
+{
+  DecodedInstruction insn = mir->dalvikInsn;
+  std::string str;
+  int flags = 0;
+  int opcode = insn.opcode;
+  char* ret;
+  bool nop = false;
+  SSARepresentation* ssa_rep = mir->ssa_rep;
+  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format
+  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+
+  // Handle special cases.
+  if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
+    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
+    str.append(": ");
+    // Recover the original Dex instruction
+    insn = mir->meta.throw_insn->dalvikInsn;
+    ssa_rep = mir->meta.throw_insn->ssa_rep;
+    defs = ssa_rep->num_defs;
+    uses = ssa_rep->num_uses;
+    opcode = insn.opcode;
+  } else if (opcode == kMirOpNop) {
+    str.append("[");
+    insn.opcode = mir->meta.original_opcode;
+    opcode = mir->meta.original_opcode;
+    nop = true;
+  }
+
+  if (opcode >= kMirOpFirst) {
+    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
+  } else {
+    dalvik_format = Instruction::FormatOf(insn.opcode);
+    flags = Instruction::FlagsOf(insn.opcode);
+    str.append(Instruction::Name(insn.opcode));
+  }
+
+  if (opcode == kMirOpPhi) {
+    int* incoming = reinterpret_cast<int*>(insn.vB);
+    str.append(StringPrintf(" %s = (%s",
+               GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
+               GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
+    str.append(StringPrintf(":%d",incoming[0]));
+    int i;
+    for (i = 1; i < uses; i++) {
+      str.append(StringPrintf(", %s:%d",
+                              GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
+                              incoming[i]));
+    }
+    str.append(")");
+  } else if ((flags & Instruction::kBranch) != 0) {
+    // For branches, decode the instructions to print out the branch targets.
+    int offset = 0;
+    switch (dalvik_format) {
+      case Instruction::k21t:
+        str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
+        offset = insn.vB;
+        break;
+      case Instruction::k22t:
+        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
+                   GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
+        offset = insn.vC;
+        break;
+      case Instruction::k10t:
+      case Instruction::k20t:
+      case Instruction::k30t:
+        offset = insn.vA;
+        break;
+      default:
+        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+    }
+    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
+                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+  } else {
+    // For invokes-style formats, treat wide regs as a pair of singles
+    bool show_singles = ((dalvik_format == Instruction::k35c) ||
+                         (dalvik_format == Instruction::k3rc));
+    if (defs != 0) {
+      str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str()));
+      if (uses != 0) {
+        str.append(", ");
+      }
+    }
+    for (int i = 0; i < uses; i++) {
+      str.append(
+          StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str()));
+      if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+        // For the listing, skip the high sreg.
+        i++;
+      }
+      if (i != (uses -1)) {
+        str.append(",");
+      }
+    }
+    switch (dalvik_format) {
+      case Instruction::k11n: // Add one immediate from vB
+      case Instruction::k21s:
+      case Instruction::k31i:
+      case Instruction::k21h:
+        str.append(StringPrintf(", #%d", insn.vB));
+        break;
+      case Instruction::k51l: // Add one wide immediate
+        str.append(StringPrintf(", #%lld", insn.vB_wide));
+        break;
+      case Instruction::k21c: // One register, one string/type/method index
+      case Instruction::k31c:
+        str.append(StringPrintf(", index #%d", insn.vB));
+        break;
+      case Instruction::k22c: // Two registers, one string/type/method index
+        str.append(StringPrintf(", index #%d", insn.vC));
+        break;
+      case Instruction::k22s: // Add one immediate from vC
+      case Instruction::k22b:
+        str.append(StringPrintf(", #%d", insn.vC));
+        break;
+      default:
+        ; // Nothing left to print
+      }
+  }
+  if (nop) {
+    str.append("]--optimized away");
+  }
+  int length = str.length() + 1;
+  ret = static_cast<char*>(NewMem(cu_, length, false, kAllocDFInfo));
+  strncpy(ret, str.c_str(), length);
+  return ret;
+}
+
+/* Turn method name into a legal Linux file name */
+void MIRGraph::ReplaceSpecialChars(std::string& str)
+{
+  static const struct { const char before; const char after; } match[] =
+      {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
+       {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
+  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
+    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
+  }
+}
+
+std::string MIRGraph::GetSSAName(int ssa_reg)
+{
+  return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+}
+
+// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
+std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only)
+{
+  if (reg_location_ == NULL) {
+    // Pre-SSA - just use the standard name
+    return GetSSAName(ssa_reg);
+  }
+  if (IsConst(reg_location_[ssa_reg])) {
+    if (!singles_only && reg_location_[ssa_reg].wide) {
+      return StringPrintf("v%d_%d#0x%llx", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
+                          ConstantValueWide(reg_location_[ssa_reg]));
+    } else {
+      return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg),GetSSASubscript(ssa_reg),
+                          ConstantValue(reg_location_[ssa_reg]));
+    }
+  } else {
+    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+  }
+}
+
+void MIRGraph::GetBlockName(BasicBlock* bb, char* name)
+{
+  switch (bb->block_type) {
+    case kEntryBlock:
+      snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
+      break;
+    case kExitBlock:
+      snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
+      break;
+    case kDalvikByteCode:
+      snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
+      break;
+    case kExceptionHandling:
+      snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
+               bb->id);
+      break;
+    default:
+      snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
+      break;
+  }
+}
+
+const char* MIRGraph::GetShortyFromTargetIdx(int target_idx)
+{
+  // FIXME: use current code unit for inline support.
+  const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
+  return cu_->dex_file->GetShorty(method_id.proto_idx_);
+}
+
+/* Debug Utility - dump a compilation unit */
+void MIRGraph::DumpMIRGraph()
+{
+  BasicBlock* bb;
+  const char* block_type_names[] = {
+    "Entry Block",
+    "Code Block",
+    "Exit Block",
+    "Exception Handling",
+    "Catch Block"
+  };
+
+  LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  LOG(INFO) << cu_->insns << " insns";
+  LOG(INFO) << GetNumBlocks() << " blocks in total";
+  GrowableListIterator iterator = GetBasicBlockIterator();
+
+  while (true) {
+    bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+    if (bb == NULL) break;
+    LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
+        bb->id,
+        block_type_names[bb->block_type],
+        bb->start_offset,
+        bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
+        bb->last_mir_insn ? "" : " empty");
+    if (bb->taken) {
+      LOG(INFO) << "  Taken branch: block " << bb->taken->id
+                << "(0x" << std::hex << bb->taken->start_offset << ")";
+    }
+    if (bb->fall_through) {
+      LOG(INFO) << "  Fallthrough : block " << bb->fall_through->id
+                << " (0x" << std::hex << bb->fall_through->start_offset << ")";
+    }
+  }
+}
+
+/*
+ * Build an array of location records for the incoming arguments.
+ * Note: one location record per word of arguments, with dummy
+ * high-word loc for wide arguments.  Also pull up any following
+ * MOVE_RESULT and incorporate it into the invoke.
+ */
+CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
+                                  bool is_range)
+{
+  CallInfo* info = static_cast<CallInfo*>(NewMem(cu_, sizeof(CallInfo), true, kAllocMisc));
+  MIR* move_result_mir = FindMoveResult(bb, mir);
+  if (move_result_mir == NULL) {
+    info->result.location = kLocInvalid;
+  } else {
+    info->result = GetRawDest(move_result_mir);
+    move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
+    move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+  }
+  info->num_arg_words = mir->ssa_rep->num_uses;
+  info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+      (NewMem(cu_, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
+  for (int i = 0; i < info->num_arg_words; i++) {
+    info->args[i] = GetRawSrc(mir, i);
+  }
+  info->opt_flags = mir->optimization_flags;
+  info->type = type;
+  info->is_range = is_range;
+  info->index = mir->dalvikInsn.vB;
+  info->offset = mir->offset;
+  return info;
+}
+
+
+
 } // namespace art
diff --git a/src/compiler/dex/mir_graph.h b/src/compiler/dex/mir_graph.h
index 8329010..514205d 100644
--- a/src/compiler/dex/mir_graph.h
+++ b/src/compiler/dex/mir_graph.h
@@ -114,7 +114,191 @@
 #define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
 #define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
 
-extern const int oat_data_flow_attributes[kMirOpLast];
+enum OatMethodAttributes {
+  kIsLeaf,            // Method is leaf.
+  kHasLoop,           // Method contains simple loop.
+};
+
+#define METHOD_IS_LEAF          (1 << kIsLeaf)
+#define METHOD_HAS_LOOP         (1 << kHasLoop)
+
+// Minimum field size to contain Dalvik v_reg number.
+#define VREG_NUM_WIDTH 16
+
+#define INVALID_SREG (-1)
+#define INVALID_VREG (0xFFFFU)
+#define INVALID_REG (0xFF)
+#define INVALID_OFFSET (0xDEADF00FU)
+
+/* SSA encodings for special registers */
+#define SSA_METHOD_BASEREG (-2)
+/* First compiler temp basereg, grows smaller */
+#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
+
+#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
+#define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
+#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
+#define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
+#define MIR_INLINED                     (1 << kMIRInlined)
+#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
+#define MIR_CALLEE                      (1 << kMIRCallee)
+#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
+#define MIR_DUP                         (1 << kMIRDup)
+
+/*
+ * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
+ * it is useful to have compiler-generated temporary registers and have them treated
+ * in the same manner as dx-generated virtual registers.  This struct records the SSA
+ * name of compiler-introduced temporaries.
+ */
+struct CompilerTemp {
+  int s_reg;
+};
+
+// When debug option enabled, records effectiveness of null and range check elimination.
+struct Checkstats {
+  int null_checks;
+  int null_checks_eliminated;
+  int range_checks;
+  int range_checks_eliminated;
+};
+
+// Dataflow attributes of a basic block.
+struct BasicBlockDataFlow {
+  ArenaBitVector* use_v;
+  ArenaBitVector* def_v;
+  ArenaBitVector* live_in_v;
+  ArenaBitVector* phi_v;
+  int* vreg_to_ssa_map;
+  ArenaBitVector* ending_null_check_v;
+};
+
+/*
+ * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
+ * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
+ * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
+ * Following SSA renaming, this is the primary struct used by code generators to locate
+ * operand and result registers.  This is a somewhat confusing and unhelpful convention that
+ * we may want to revisit in the future.
+ */
+struct SSARepresentation {
+  int num_uses;
+  int* uses;
+  bool* fp_use;
+  int num_defs;
+  int* defs;
+  bool* fp_def;
+};
+
+/*
+ * The Midlevel Intermediate Representation node, which may be largely considered a
+ * wrapper around a Dalvik byte code.
+ */
+struct MIR {
+  DecodedInstruction dalvikInsn;
+  unsigned int width;
+  unsigned int offset;
+  int m_unit_index;               // From which method was this MIR included
+  MIR* prev;
+  MIR* next;
+  SSARepresentation* ssa_rep;
+  int optimization_flags;
+  union {
+    // Establish link between two halves of throwing instructions.
+    MIR* throw_insn;
+    // Saved opcode for NOP'd MIRs
+    Instruction::Code original_opcode;
+  } meta;
+};
+
+struct BasicBlock {
+  int id;
+  int dfs_id;
+  bool visited;
+  bool hidden;
+  bool catch_entry;
+  bool explicit_throw;
+  bool conditional_branch;
+  bool terminated_by_return;        // Block ends with a Dalvik return opcode.
+  bool dominates_return;            // Is a member of return extended basic block.
+  uint16_t start_offset;
+  uint16_t nesting_depth;
+  BBType block_type;
+  MIR* first_mir_insn;
+  MIR* last_mir_insn;
+  BasicBlock* fall_through;
+  BasicBlock* taken;
+  BasicBlock* i_dom;                // Immediate dominator.
+  BasicBlockDataFlow* data_flow_info;
+  GrowableList* predecessors;
+  ArenaBitVector* dominators;
+  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
+  ArenaBitVector* dom_frontier;     // Dominance frontier.
+  struct {                          // For one-to-many successors like.
+    BlockListType block_list_type;  // switch and exception handling.
+    GrowableList blocks;
+  } successor_block_list;
+};
+
+/*
+ * The "blocks" field in "successor_block_list" points to an array of elements with the type
+ * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
+ * blocks, key is the case value.
+ */
+struct SuccessorBlockInfo {
+  BasicBlock* block;
+  int key;
+};
+
+/*
+ * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
+ * the type of an SSA name (and, can also be used by code generators to record where the
+ * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
+ * there is a RegLocation.
+ * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
+ * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
+ */
+struct RegLocation {
+  RegLocationType location:3;
+  unsigned wide:1;
+  unsigned defined:1;   // Do we know the type?
+  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
+  unsigned fp:1;        // Floating point?
+  unsigned core:1;      // Non-floating point?
+  unsigned ref:1;       // Something GC cares about.
+  unsigned high_word:1; // High word of pair?
+  unsigned home:1;      // Does this represent the home location?
+  uint8_t low_reg;      // First physical register.
+  uint8_t high_reg;     // 2nd physical register (if wide).
+  int32_t s_reg_low;    // SSA name for low Dalvik word.
+  int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
+                        // and consolodate usage w/ s_reg_low.
+};
+
+/*
+ * Collection of information describing an invoke, and the destination of
+ * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
+ * more efficient invoke code generation.
+ */
+struct CallInfo {
+  int num_arg_words;    // Note: word count, not arg count.
+  RegLocation* args;    // One for each word of arguments.
+  RegLocation result;   // Eventual target of MOVE_RESULT.
+  int opt_flags;
+  InvokeType type;
+  uint32_t dex_idx;
+  uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
+  uintptr_t direct_code;
+  uintptr_t direct_method;
+  RegLocation target;    // Target of following move_result.
+  bool skip_this;
+  bool is_range;
+  int offset;            // Dalvik offset.
+};
+
+
+const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+                             INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
 
 class MIRGraph {
  public:
@@ -213,8 +397,6 @@
 
   void DumpRegLocTable(RegLocation* table, int count);
 
-  int ComputeFrameSize();
-
   void BasicBlockOptimization();
 
   bool IsConst(int32_t s_reg) const {
@@ -273,6 +455,69 @@
     return GET_ELEM_N(ssa_strings_, char*, ssa_reg);
   }
 
+  RegLocation GetRawSrc(MIR* mir, int num)
+  {
+    DCHECK(num < mir->ssa_rep->num_uses);
+    RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
+    return res;
+  }
+
+  RegLocation GetRawDest(MIR* mir)
+  {
+    DCHECK_GT(mir->ssa_rep->num_defs, 0);
+    RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
+    return res;
+  }
+
+  RegLocation GetDest(MIR* mir)
+  {
+    RegLocation res = GetRawDest(mir);
+    DCHECK(!res.wide);
+    return res;
+  }
+
+  RegLocation GetSrc(MIR* mir, int num)
+  {
+    RegLocation res = GetRawSrc(mir, num);
+    DCHECK(!res.wide);
+    return res;
+  }
+
+  RegLocation GetDestWide(MIR* mir)
+  {
+    RegLocation res = GetRawDest(mir);
+    DCHECK(res.wide);
+    return res;
+  }
+
+  RegLocation GetSrcWide(MIR* mir, int low)
+  {
+    RegLocation res = GetRawSrc(mir, low);
+    DCHECK(res.wide);
+    return res;
+  }
+
+  RegLocation GetBadLoc() {
+    return bad_loc;
+  }
+
+  int GetMethodSReg() {
+    return method_sreg_;
+  }
+
+  bool MethodIsLeaf() {
+    return attributes_ & METHOD_IS_LEAF;
+  }
+
+  RegLocation GetRegLocation(int index) {
+    DCHECK((index >= 0) && (index > num_ssa_regs_));
+    return reg_location_[index];
+  }
+
+  RegLocation GetMethodLoc() {
+    return reg_location_[method_sreg_];
+  }
+
   void BasicBlockCombine();
   void CodeLayout();
   void DumpCheckStats();
@@ -284,6 +529,23 @@
   void SSATransformation();
   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
   void NullCheckElimination();
+  bool SetFp(int index, bool is_fp);
+  bool SetCore(int index, bool is_core);
+  bool SetRef(int index, bool is_ref);
+  bool SetWide(int index, bool is_wide);
+  bool SetHigh(int index, bool is_high);
+  void AppendMIR(BasicBlock* bb, MIR* mir);
+  void PrependMIR(BasicBlock* bb, MIR* mir);
+  void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
+  char* GetDalvikDisassembly(const MIR* mir);
+
+  void ReplaceSpecialChars(std::string& str);
+  std::string GetSSAName(int ssa_reg);
+  std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
+  void GetBlockName(BasicBlock* bb, char* name);
+  const char* GetShortyFromTargetIdx(int);
+  void DumpMIRGraph();
+  CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
 
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
@@ -291,6 +553,14 @@
    */
    std::set<uint32_t> catches_;
 
+   // TODO: make these private.
+   RegLocation* reg_location_;                         // Map SSA names to location.
+   GrowableList compiler_temps_;
+   SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
+
+   static const int oat_data_flow_attributes_[kMirOpLast];
+   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
+
  private:
 
    int FindCommonParent(int block1, int block2);
@@ -393,6 +663,9 @@
    int* opcode_count_;                            // Dex opcode coverage stats.
    int num_ssa_regs_;                             // Number of names following SSA transformation.
    std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
+   int method_sreg_;
+   unsigned int attributes_;
+   Checkstats* checkstats_;
 };
 
 }  // namespace art
diff --git a/src/compiler/dex/mir_optimization.cc b/src/compiler/dex/mir_optimization.cc
index 759dc32..51b9d9d 100644
--- a/src/compiler/dex/mir_optimization.cc
+++ b/src/compiler/dex/mir_optimization.cc
@@ -45,7 +45,7 @@
   ArenaBitVector *is_constant_v = is_constant_v_;
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
     DecodedInstruction *d_insn = &mir->dalvikInsn;
 
@@ -107,7 +107,6 @@
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     DoConstantPropogation(bb);
   }
-
 }
 
 /* Advance to next strictly dominated MIR node in an extended basic block */
@@ -236,7 +235,7 @@
         case Instruction::CMPG_FLOAT:
         case Instruction::CMPG_DOUBLE:
         case Instruction::CMP_LONG:
-          if (cu_->gen_bitcode) {
+          if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
             // Bitcode doesn't allow this optimization.
             break;
           }
@@ -332,7 +331,7 @@
       // Is this the select pattern?
       // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
       // TUNING: expand to support IF_xx compare & branches
-      if (!cu_->gen_bitcode && (cu_->instruction_set == kThumb2) &&
+      if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) &&
           ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
           (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
         BasicBlock* ft = bb->fall_through;
@@ -485,18 +484,17 @@
     if (mir->ssa_rep == NULL) {
       continue;
     }
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
     if (df_attributes & DF_HAS_NULL_CHKS) {
-      //TODO: move checkstats to mir_graph
-      cu_->checkstats->null_checks++;
+      checkstats_->null_checks++;
       if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
-        cu_->checkstats->null_checks_eliminated++;
+        checkstats_->null_checks_eliminated++;
       }
     }
     if (df_attributes & DF_HAS_RANGE_CHKS) {
-      cu_->checkstats->range_checks++;
+      checkstats_->range_checks++;
       if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
-        cu_->checkstats->range_checks_eliminated++;
+        checkstats_->range_checks_eliminated++;
       }
     }
   }
@@ -572,7 +570,7 @@
     MIR* mir = bb->last_mir_insn;
     // Grab the attributes from the paired opcode
     MIR* throw_insn = mir->meta.throw_insn;
-    int df_attributes = oat_data_flow_attributes[throw_insn->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[throw_insn->dalvikInsn.opcode];
     bool can_combine = true;
     if (df_attributes & DF_HAS_NULL_CHKS) {
       can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
@@ -613,7 +611,7 @@
 
     // Kill bb_next and remap now-dead id to parent
     bb_next->block_type = kDead;
-    cu_->block_id_map.Overwrite(bb_next->id, bb->id);
+    block_id_map_.Overwrite(bb_next->id, bb->id);
 
     // Now, loop back and see if we can keep going
   }
@@ -661,7 +659,7 @@
     if (mir->ssa_rep == NULL) {
         continue;
     }
-    int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
     // Mark target of NEW* as non-null
     if (df_attributes & DF_NON_NULL_DST) {
@@ -782,6 +780,9 @@
 
 void MIRGraph::CodeLayout()
 {
+  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
+    VerifyDataflow();
+  }
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     LayoutBlocks(bb);
@@ -795,7 +796,7 @@
 {
   Checkstats* stats =
       static_cast<Checkstats*>(NewMem(cu_, sizeof(Checkstats), true, kAllocDFInfo));
-  cu_->checkstats = stats;
+  checkstats_ = stats;
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     CountChecks(bb);
@@ -850,7 +851,7 @@
 void MIRGraph::BasicBlockOptimization()
 {
   if (!(cu_->disable_opt & (1 << kBBOpt))) {
-    CompilerInitGrowableList(cu_, &cu_->compiler_temps, 6, kListMisc);
+    CompilerInitGrowableList(cu_, &compiler_temps_, 6, kListMisc);
     DCHECK_EQ(cu_->num_compiler_temps, 0);
     // Mark all blocks as not visited
     AllNodesIterator iter(this, false /* not iterative */);
diff --git a/src/compiler/dex/portable/mir_to_gbc.cc b/src/compiler/dex/portable/mir_to_gbc.cc
index af8e459..6dcdfcf 100644
--- a/src/compiler/dex/portable/mir_to_gbc.cc
+++ b/src/compiler/dex/portable/mir_to_gbc.cc
@@ -29,46 +29,44 @@
 
 #include "compiler/dex/compiler_internals.h"
 #include "compiler/dex/dataflow_iterator.h"
+#include "compiler/dex/frontend.h"
+#include "mir_to_gbc.h"
 
-//TODO: move gbc_to_lir code into quick directory (if necessary).
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/local_optimizations.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "compiler/llvm/llvm_compilation_unit.h"
 #include "compiler/llvm/utils_llvm.h"
 
-static const char* kLabelFormat = "%c0x%x_%d";
-static const char kInvalidBlock = 0xff;
-static const char kNormalBlock = 'L';
-static const char kCatchBlock = 'C';
+const char* kLabelFormat = "%c0x%x_%d";
+const char kInvalidBlock = 0xff;
+const char kNormalBlock = 'L';
+const char kCatchBlock = 'C';
 
 namespace art {
 
-static ::llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cu, int id)
+::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id)
 {
-  return cu->id_to_block_map.Get(id);
+  return id_to_block_map_.Get(id);
 }
 
-static ::llvm::Value* GetLLVMValue(CompilationUnit* cu, int s_reg)
+::llvm::Value* MirConverter::GetLLVMValue(int s_reg)
 {
-  return reinterpret_cast< ::llvm::Value*>(GrowableListGetElement(&cu->llvm_values, s_reg));
+  return reinterpret_cast< ::llvm::Value*>(GrowableListGetElement(&llvm_values_, s_reg));
 }
 
-static void SetVregOnValue(CompilationUnit* cu, ::llvm::Value* val, int s_reg)
+void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg)
 {
   // Set vreg for debugging
   art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg;
-  ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
-  ::llvm::Value* table_slot = cu->irb->getInt32(v_reg);
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  ::llvm::Value* table_slot = irb_->getInt32(v_reg);
   ::llvm::Value* args[] = { table_slot, val };
-  cu->irb->CreateCall(func, args);
+  irb_->CreateCall(func, args);
 }
 
 // Replace the placeholder value with the real definition
-static void DefineValueOnly(CompilationUnit* cu, ::llvm::Value* val, int s_reg)
+void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg)
 {
-  ::llvm::Value* placeholder = GetLLVMValue(cu, s_reg);
+  ::llvm::Value* placeholder = GetLLVMValue(s_reg);
   if (placeholder == NULL) {
     // This can happen on instruction rewrite on verification failure
     LOG(WARNING) << "Null placeholder";
@@ -76,149 +74,148 @@
   }
   placeholder->replaceAllUsesWith(val);
   val->takeName(placeholder);
-  cu->llvm_values.elem_list[s_reg] = reinterpret_cast<uintptr_t>(val);
+  llvm_values_.elem_list[s_reg] = reinterpret_cast<uintptr_t>(val);
   ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
   DCHECK(inst != NULL);
   inst->eraseFromParent();
 
 }
 
-static void DefineValue(CompilationUnit* cu, ::llvm::Value* val, int s_reg)
+void MirConverter::DefineValue(::llvm::Value* val, int s_reg)
 {
-  DefineValueOnly(cu, val, s_reg);
-  SetVregOnValue(cu, val, s_reg);
+  DefineValueOnly(val, s_reg);
+  SetVregOnValue(val, s_reg);
 }
 
-static ::llvm::Type* LlvmTypeFromLocRec(CompilationUnit* cu, RegLocation loc)
+::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc)
 {
   ::llvm::Type* res = NULL;
   if (loc.wide) {
     if (loc.fp)
-        res = cu->irb->getDoubleTy();
+        res = irb_->getDoubleTy();
     else
-        res = cu->irb->getInt64Ty();
+        res = irb_->getInt64Ty();
   } else {
     if (loc.fp) {
-      res = cu->irb->getFloatTy();
+      res = irb_->getFloatTy();
     } else {
       if (loc.ref)
-        res = cu->irb->getJObjectTy();
+        res = irb_->getJObjectTy();
       else
-        res = cu->irb->getInt32Ty();
+        res = irb_->getInt32Ty();
     }
   }
   return res;
 }
 
-static void InitIR(CompilationUnit* cu)
+void MirConverter::InitIR()
 {
-  LLVMInfo* llvm_info = cu->llvm_info;
-  if (llvm_info == NULL) {
-    CompilerTls* tls = cu->compiler_driver->GetTls();
+  if (llvm_info_ == NULL) {
+    CompilerTls* tls = cu_->compiler_driver->GetTls();
     CHECK(tls != NULL);
-    llvm_info = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
-    if (llvm_info == NULL) {
-      llvm_info = new LLVMInfo();
-      tls->SetLLVMInfo(llvm_info);
+    llvm_info_ = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
+    if (llvm_info_ == NULL) {
+      llvm_info_ = new LLVMInfo();
+      tls->SetLLVMInfo(llvm_info_);
     }
   }
-  cu->context = llvm_info->GetLLVMContext();
-  cu->module = llvm_info->GetLLVMModule();
-  cu->intrinsic_helper = llvm_info->GetIntrinsicHelper();
-  cu->irb = llvm_info->GetIRBuilder();
+  context_ = llvm_info_->GetLLVMContext();
+  module_ = llvm_info_->GetLLVMModule();
+  intrinsic_helper_ = llvm_info_->GetIntrinsicHelper();
+  irb_ = llvm_info_->GetIRBuilder();
 }
 
-::llvm::BasicBlock* FindCaseTarget(CompilationUnit* cu, uint32_t vaddr)
+::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr)
 {
-  BasicBlock* bb = cu->mir_graph.get()->FindBlock(vaddr);
+  BasicBlock* bb = mir_graph_->FindBlock(vaddr);
   DCHECK(bb != NULL);
-  return GetLLVMBlock(cu, bb->id);
+  return GetLLVMBlock(bb->id);
 }
 
-static void ConvertPackedSwitch(CompilationUnit* cu, BasicBlock* bb,
+void MirConverter::ConvertPackedSwitch(BasicBlock* bb,
                                 int32_t table_offset, RegLocation rl_src)
 {
   const Instruction::PackedSwitchPayload* payload =
       reinterpret_cast<const Instruction::PackedSwitchPayload*>(
-      cu->insns + cu->current_dalvik_offset + table_offset);
+      cu_->insns + current_dalvik_offset_ + table_offset);
 
-  ::llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
+  ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
 
   ::llvm::SwitchInst* sw =
-    cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
                              payload->case_count);
 
   for (uint16_t i = 0; i < payload->case_count; ++i) {
     ::llvm::BasicBlock* llvm_bb =
-        FindCaseTarget(cu, cu->current_dalvik_offset + payload->targets[i]);
-    sw->addCase(cu->irb->getInt32(payload->first_key + i), llvm_bb);
+        FindCaseTarget(current_dalvik_offset_ + payload->targets[i]);
+    sw->addCase(irb_->getInt32(payload->first_key + i), llvm_bb);
   }
   ::llvm::MDNode* switch_node =
-      ::llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+      ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
   sw->setMetadata("SwitchTable", switch_node);
   bb->taken = NULL;
   bb->fall_through = NULL;
 }
 
-static void ConvertSparseSwitch(CompilationUnit* cu, BasicBlock* bb,
+void MirConverter::ConvertSparseSwitch(BasicBlock* bb,
                                 int32_t table_offset, RegLocation rl_src)
 {
   const Instruction::SparseSwitchPayload* payload =
       reinterpret_cast<const Instruction::SparseSwitchPayload*>(
-      cu->insns + cu->current_dalvik_offset + table_offset);
+      cu_->insns + current_dalvik_offset_ + table_offset);
 
   const int32_t* keys = payload->GetKeys();
   const int32_t* targets = payload->GetTargets();
 
-  ::llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
+  ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
 
   ::llvm::SwitchInst* sw =
-    cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
+    irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
                              payload->case_count);
 
   for (size_t i = 0; i < payload->case_count; ++i) {
     ::llvm::BasicBlock* llvm_bb =
-        FindCaseTarget(cu, cu->current_dalvik_offset + targets[i]);
-    sw->addCase(cu->irb->getInt32(keys[i]), llvm_bb);
+        FindCaseTarget(current_dalvik_offset_ + targets[i]);
+    sw->addCase(irb_->getInt32(keys[i]), llvm_bb);
   }
   ::llvm::MDNode* switch_node =
-      ::llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+      ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
   sw->setMetadata("SwitchTable", switch_node);
   bb->taken = NULL;
   bb->fall_through = NULL;
 }
 
-static void ConvertSget(CompilationUnit* cu, int32_t field_index,
+void MirConverter::ConvertSget(int32_t field_index,
                         art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
 {
-  ::llvm::Constant* field_idx = cu->irb->getInt32(field_index);
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, field_idx);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Constant* field_idx = irb_->getInt32(field_index);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, field_idx);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertSput(CompilationUnit* cu, int32_t field_index,
+void MirConverter::ConvertSput(int32_t field_index,
                         art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src)
 {
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(field_index));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(intr, args);
+  args.push_back(irb_->getInt32(field_index));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
 }
 
-static void ConvertFillArrayData(CompilationUnit* cu, int32_t offset, RegLocation rl_array)
+void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::HLFillArrayData;
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(offset));
-  args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(intr, args);
+  args.push_back(irb_->getInt32(offset));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
 }
 
-static ::llvm::Value* EmitConst(CompilationUnit* cu, ::llvm::ArrayRef< ::llvm::Value*> src,
+::llvm::Value* MirConverter::EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
                               RegLocation loc)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
@@ -237,18 +234,18 @@
       id = art::llvm::IntrinsicHelper::ConstInt;
     }
   }
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  return cu->irb->CreateCall(intr, src);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  return irb_->CreateCall(intr, src);
 }
 
-static void EmitPopShadowFrame(CompilationUnit* cu)
+void MirConverter::EmitPopShadowFrame()
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
       art::llvm::IntrinsicHelper::PopShadowFrame);
-  cu->irb->CreateCall(intr);
+  irb_->CreateCall(intr);
 }
 
-static ::llvm::Value* EmitCopy(CompilationUnit* cu, ::llvm::ArrayRef< ::llvm::Value*> src,
+::llvm::Value* MirConverter::EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
                              RegLocation loc)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
@@ -267,111 +264,111 @@
       id = art::llvm::IntrinsicHelper::CopyInt;
     }
   }
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  return cu->irb->CreateCall(intr, src);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  return irb_->CreateCall(intr, src);
 }
 
-static void ConvertMoveException(CompilationUnit* cu, RegLocation rl_dest)
+void MirConverter::ConvertMoveException(RegLocation rl_dest)
 {
-  ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
       art::llvm::IntrinsicHelper::GetException);
-  ::llvm::Value* res = cu->irb->CreateCall(func);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* res = irb_->CreateCall(func);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertThrow(CompilationUnit* cu, RegLocation rl_src)
+void MirConverter::ConvertThrow(RegLocation rl_src)
 {
-  ::llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
-  ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
       art::llvm::IntrinsicHelper::HLThrowException);
-  cu->irb->CreateCall(func, src);
+  irb_->CreateCall(func, src);
 }
 
-static void ConvertMonitorEnterExit(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertMonitorEnterExit(int opt_flags,
                                     art::llvm::IntrinsicHelper::IntrinsicId id,
                                     RegLocation rl_src)
 {
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(func, args);
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(func, args);
 }
 
-static void ConvertArrayLength(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertArrayLength(int opt_flags,
                                RegLocation rl_dest, RegLocation rl_src)
 {
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
       art::llvm::IntrinsicHelper::OptArrayLength);
-  ::llvm::Value* res = cu->irb->CreateCall(func, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* res = irb_->CreateCall(func, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void EmitSuspendCheck(CompilationUnit* cu)
+void MirConverter::EmitSuspendCheck()
 {
   art::llvm::IntrinsicHelper::IntrinsicId id =
       art::llvm::IntrinsicHelper::CheckSuspend;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(intr);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr);
 }
 
-static ::llvm::Value* ConvertCompare(CompilationUnit* cu, ConditionCode cc,
+::llvm::Value* MirConverter::ConvertCompare(ConditionCode cc,
                                    ::llvm::Value* src1, ::llvm::Value* src2)
 {
   ::llvm::Value* res = NULL;
   DCHECK_EQ(src1->getType(), src2->getType());
   switch(cc) {
-    case kCondEq: res = cu->irb->CreateICmpEQ(src1, src2); break;
-    case kCondNe: res = cu->irb->CreateICmpNE(src1, src2); break;
-    case kCondLt: res = cu->irb->CreateICmpSLT(src1, src2); break;
-    case kCondGe: res = cu->irb->CreateICmpSGE(src1, src2); break;
-    case kCondGt: res = cu->irb->CreateICmpSGT(src1, src2); break;
-    case kCondLe: res = cu->irb->CreateICmpSLE(src1, src2); break;
+    case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
+    case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
+    case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
+    case kCondGe: res = irb_->CreateICmpSGE(src1, src2); break;
+    case kCondGt: res = irb_->CreateICmpSGT(src1, src2); break;
+    case kCondLe: res = irb_->CreateICmpSLE(src1, src2); break;
     default: LOG(FATAL) << "Unexpected cc value " << cc;
   }
   return res;
 }
 
-static void ConvertCompareAndBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir,
                                     ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2)
 {
   if (bb->taken->start_offset <= mir->offset) {
-    EmitSuspendCheck(cu);
+    EmitSuspendCheck();
   }
-  ::llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
-  ::llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
-  ::llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
-  cond_value->setName(StringPrintf("t%d", cu->temp_name++));
-  cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
-                           GetLLVMBlock(cu, bb->fall_through->id));
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
+  ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
+  cond_value->setName(StringPrintf("t%d", temp_name_++));
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
+                           GetLLVMBlock(bb->fall_through->id));
   // Don't redo the fallthrough branch in the BB driver
   bb->fall_through = NULL;
 }
 
-static void ConvertCompareZeroAndBranch(CompilationUnit* cu, BasicBlock* bb,
+void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
                                         MIR* mir, ConditionCode cc, RegLocation rl_src1)
 {
   if (bb->taken->start_offset <= mir->offset) {
-    EmitSuspendCheck(cu);
+    EmitSuspendCheck();
   }
-  ::llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
   ::llvm::Value* src2;
   if (rl_src1.ref) {
-    src2 = cu->irb->getJNull();
+    src2 = irb_->getJNull();
   } else {
-    src2 = cu->irb->getInt32(0);
+    src2 = irb_->getInt32(0);
   }
-  ::llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
-  cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
-                           GetLLVMBlock(cu, bb->fall_through->id));
+  ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
+  irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
+                           GetLLVMBlock(bb->fall_through->id));
   // Don't redo the fallthrough branch in the BB driver
   bb->fall_through = NULL;
 }
 
-static ::llvm::Value* GenDivModOp(CompilationUnit* cu, bool is_div, bool is_long,
+::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long,
                                 ::llvm::Value* src1, ::llvm::Value* src2)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
@@ -388,93 +385,93 @@
       id = art::llvm::IntrinsicHelper::RemInt;
     }
   }
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2>args;
   args.push_back(src1);
   args.push_back(src2);
-  return cu->irb->CreateCall(intr, args);
+  return irb_->CreateCall(intr, args);
 }
 
-static ::llvm::Value* GenArithOp(CompilationUnit* cu, OpKind op, bool is_long,
+::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
                                ::llvm::Value* src1, ::llvm::Value* src2)
 {
   ::llvm::Value* res = NULL;
   switch(op) {
-    case kOpAdd: res = cu->irb->CreateAdd(src1, src2); break;
-    case kOpSub: res = cu->irb->CreateSub(src1, src2); break;
-    case kOpRsub: res = cu->irb->CreateSub(src2, src1); break;
-    case kOpMul: res = cu->irb->CreateMul(src1, src2); break;
-    case kOpOr: res = cu->irb->CreateOr(src1, src2); break;
-    case kOpAnd: res = cu->irb->CreateAnd(src1, src2); break;
-    case kOpXor: res = cu->irb->CreateXor(src1, src2); break;
-    case kOpDiv: res = GenDivModOp(cu, true, is_long, src1, src2); break;
-    case kOpRem: res = GenDivModOp(cu, false, is_long, src1, src2); break;
-    case kOpLsl: res = cu->irb->CreateShl(src1, src2); break;
-    case kOpLsr: res = cu->irb->CreateLShr(src1, src2); break;
-    case kOpAsr: res = cu->irb->CreateAShr(src1, src2); break;
+    case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
+    case kOpSub: res = irb_->CreateSub(src1, src2); break;
+    case kOpRsub: res = irb_->CreateSub(src2, src1); break;
+    case kOpMul: res = irb_->CreateMul(src1, src2); break;
+    case kOpOr: res = irb_->CreateOr(src1, src2); break;
+    case kOpAnd: res = irb_->CreateAnd(src1, src2); break;
+    case kOpXor: res = irb_->CreateXor(src1, src2); break;
+    case kOpDiv: res = GenDivModOp(true, is_long, src1, src2); break;
+    case kOpRem: res = GenDivModOp(false, is_long, src1, src2); break;
+    case kOpLsl: res = irb_->CreateShl(src1, src2); break;
+    case kOpLsr: res = irb_->CreateLShr(src1, src2); break;
+    case kOpAsr: res = irb_->CreateAShr(src1, src2); break;
     default:
       LOG(FATAL) << "Invalid op " << op;
   }
   return res;
 }
 
-static void ConvertFPArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2)
 {
-  ::llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
-  ::llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
   ::llvm::Value* res = NULL;
   switch(op) {
-    case kOpAdd: res = cu->irb->CreateFAdd(src1, src2); break;
-    case kOpSub: res = cu->irb->CreateFSub(src1, src2); break;
-    case kOpMul: res = cu->irb->CreateFMul(src1, src2); break;
-    case kOpDiv: res = cu->irb->CreateFDiv(src1, src2); break;
-    case kOpRem: res = cu->irb->CreateFRem(src1, src2); break;
+    case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
+    case kOpSub: res = irb_->CreateFSub(src1, src2); break;
+    case kOpMul: res = irb_->CreateFMul(src1, src2); break;
+    case kOpDiv: res = irb_->CreateFDiv(src1, src2); break;
+    case kOpRem: res = irb_->CreateFRem(src1, src2); break;
     default:
       LOG(FATAL) << "Invalid op " << op;
   }
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertShift(CompilationUnit* cu, art::llvm::IntrinsicHelper::IntrinsicId id,
+void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
                          RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2>args;
-  args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(GetLLVMValue(rl_src1.orig_sreg));
+  args.push_back(GetLLVMValue(rl_src2.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertShiftLit(CompilationUnit* cu, art::llvm::IntrinsicHelper::IntrinsicId id,
+void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
                             RegLocation rl_dest, RegLocation rl_src, int shift_amount)
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2>args;
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  args.push_back(cu->irb->getInt32(shift_amount));
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(irb_->getInt32(shift_amount));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2)
 {
-  ::llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
-  ::llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
   DCHECK_EQ(src1->getType(), src2->getType());
-  ::llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertArithOpLit(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest,
                               RegLocation rl_src1, int32_t imm)
 {
-  ::llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
-  ::llvm::Value* src2 = cu->irb->getInt32(imm);
-  ::llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
+  ::llvm::Value* src2 = irb_->getInt32(imm);
+  ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
 /*
@@ -482,21 +479,20 @@
  * collect and process arguments for NEW_FILLED_ARRAY and NEW_FILLED_ARRAY_RANGE.
  * The requirements are similar.
  */
-static void ConvertInvoke(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir,
                           InvokeType invoke_type, bool is_range, bool is_filled_new_array)
 {
-  Codegen* cg = cu->cg.get();
-  CallInfo* info = cg->NewMemCallInfo(cu, bb, mir, invoke_type, is_range);
+  CallInfo* info = mir_graph_->NewMemCallInfo(bb, mir, invoke_type, is_range);
   ::llvm::SmallVector< ::llvm::Value*, 10> args;
   // Insert the invoke_type
-  args.push_back(cu->irb->getInt32(static_cast<int>(invoke_type)));
+  args.push_back(irb_->getInt32(static_cast<int>(invoke_type)));
   // Insert the method_idx
-  args.push_back(cu->irb->getInt32(info->index));
+  args.push_back(irb_->getInt32(info->index));
   // Insert the optimization flags
-  args.push_back(cu->irb->getInt32(info->opt_flags));
+  args.push_back(irb_->getInt32(info->opt_flags));
   // Now, insert the actual arguments
   for (int i = 0; i < info->num_arg_words;) {
-    ::llvm::Value* val = GetLLVMValue(cu, info->args[i].orig_sreg);
+    ::llvm::Value* val = GetLLVMValue(info->args[i].orig_sreg);
     args.push_back(val);
     i += info->args[i].wide ? 2 : 1;
   }
@@ -525,215 +521,214 @@
         id = art::llvm::IntrinsicHelper::HLInvokeInt;
     }
   }
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
   if (info->result.location != kLocInvalid) {
-    DefineValue(cu, res, info->result.orig_sreg);
+    DefineValue(res, info->result.orig_sreg);
   }
 }
 
-static void ConvertConstObject(CompilationUnit* cu, uint32_t idx,
+void MirConverter::ConvertConstObject(uint32_t idx,
                                art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* index = cu->irb->getInt32(idx);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, index);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* index = irb_->getInt32(idx);
+  ::llvm::Value* res = irb_->CreateCall(intr, index);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
+void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::HLCheckCast;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(type_idx));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  cu->irb->CreateCall(intr, args);
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  irb_->CreateCall(intr, args);
 }
 
-static void ConvertNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::NewInstance;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* index = cu->irb->getInt32(type_idx);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, index);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* index = irb_->getInt32(type_idx);
+  ::llvm::Value* res = irb_->CreateCall(intr, index);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertNewArray(CompilationUnit* cu, uint32_t type_idx,
+void MirConverter::ConvertNewArray(uint32_t type_idx,
                             RegLocation rl_dest, RegLocation rl_src)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::NewArray;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(type_idx));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertAget(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertAget(int opt_flags,
                         art::llvm::IntrinsicHelper::IntrinsicId id,
                         RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index)
 {
   ::llvm::SmallVector< ::llvm::Value*, 3> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  args.push_back(GetLLVMValue(rl_index.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertAput(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertAput(int opt_flags,
                         art::llvm::IntrinsicHelper::IntrinsicId id,
                         RegLocation rl_src, RegLocation rl_array, RegLocation rl_index)
 {
   ::llvm::SmallVector< ::llvm::Value*, 4> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(intr, args);
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(GetLLVMValue(rl_array.orig_sreg));
+  args.push_back(GetLLVMValue(rl_index.orig_sreg));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
 }
 
-static void ConvertIget(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertIget(int opt_flags,
                         art::llvm::IntrinsicHelper::IntrinsicId id,
                         RegLocation rl_dest, RegLocation rl_obj, int field_index)
 {
   ::llvm::SmallVector< ::llvm::Value*, 3> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
-  args.push_back(cu->irb->getInt32(field_index));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_obj.orig_sreg));
+  args.push_back(irb_->getInt32(field_index));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertIput(CompilationUnit* cu, int opt_flags,
+void MirConverter::ConvertIput(int opt_flags,
                         art::llvm::IntrinsicHelper::IntrinsicId id,
                         RegLocation rl_src, RegLocation rl_obj, int field_index)
 {
   ::llvm::SmallVector< ::llvm::Value*, 4> args;
-  args.push_back(cu->irb->getInt32(opt_flags));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
-  args.push_back(cu->irb->getInt32(field_index));
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  cu->irb->CreateCall(intr, args);
+  args.push_back(irb_->getInt32(opt_flags));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  args.push_back(GetLLVMValue(rl_obj.orig_sreg));
+  args.push_back(irb_->getInt32(field_index));
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  irb_->CreateCall(intr, args);
 }
 
-static void ConvertInstanceOf(CompilationUnit* cu, uint32_t type_idx,
+void MirConverter::ConvertInstanceOf(uint32_t type_idx,
                               RegLocation rl_dest, RegLocation rl_src)
 {
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::InstanceOf;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(cu->irb->getInt32(type_idx));
-  args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(irb_->getInt32(type_idx));
+  args.push_back(GetLLVMValue(rl_src.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* res = cu->irb->CreateSExt(GetLLVMValue(cu, rl_src.orig_sreg),
-                                            cu->irb->getInt64Ty());
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* res = irb_->CreateSExt(GetLLVMValue(rl_src.orig_sreg),
+                                            irb_->getInt64Ty());
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertLongToInt(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
-  ::llvm::Value* res = cu->irb->CreateTrunc(src, cu->irb->getInt32Ty());
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateTrunc(src, irb_->getInt32Ty());
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertFloatToDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
-  ::llvm::Value* res = cu->irb->CreateFPExt(src, cu->irb->getDoubleTy());
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateFPExt(src, irb_->getDoubleTy());
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertDoubleToFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
-  ::llvm::Value* res = cu->irb->CreateFPTrunc(src, cu->irb->getFloatTy());
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateFPTrunc(src, irb_->getFloatTy());
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertWideComparison(CompilationUnit* cu,
-                                  art::llvm::IntrinsicHelper::IntrinsicId id,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2)
+void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
+                                         RegLocation rl_dest, RegLocation rl_src1,
+                                         RegLocation rl_src2)
 {
   DCHECK_EQ(rl_src1.fp, rl_src2.fp);
   DCHECK_EQ(rl_src1.wide, rl_src2.wide);
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::SmallVector< ::llvm::Value*, 2> args;
-  args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
-  args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
-  ::llvm::Value* res = cu->irb->CreateCall(intr, args);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  args.push_back(GetLLVMValue(rl_src1.orig_sreg));
+  args.push_back(GetLLVMValue(rl_src2.orig_sreg));
+  ::llvm::Value* res = irb_->CreateCall(intr, args);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertIntNarrowing(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src,
+void MirConverter::ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
                                 art::llvm::IntrinsicHelper::IntrinsicId id)
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::Value* res =
-      cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
-  DefineValue(cu, res, rl_dest.orig_sreg);
+      irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertNeg(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* res = cu->irb->CreateNeg(GetLLVMValue(cu, rl_src.orig_sreg));
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* res = irb_->CreateNeg(GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertIntToFP(CompilationUnit* cu, ::llvm::Type* ty, RegLocation rl_dest,
+void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest,
                            RegLocation rl_src)
 {
   ::llvm::Value* res =
-      cu->irb->CreateSIToFP(GetLLVMValue(cu, rl_src.orig_sreg), ty);
-  DefineValue(cu, res, rl_dest.orig_sreg);
+      irb_->CreateSIToFP(GetLLVMValue(rl_src.orig_sreg), ty);
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertFPToInt(CompilationUnit* cu, art::llvm::IntrinsicHelper::IntrinsicId id,
+void MirConverter::ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
                            RegLocation rl_dest,
                     RegLocation rl_src)
 {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Value* res = cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
 
-static void ConvertNegFP(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src)
 {
   ::llvm::Value* res =
-      cu->irb->CreateFNeg(GetLLVMValue(cu, rl_src.orig_sreg));
-  DefineValue(cu, res, rl_dest.orig_sreg);
+      irb_->CreateFNeg(GetLLVMValue(rl_src.orig_sreg));
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void ConvertNot(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src)
 {
-  ::llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
-  ::llvm::Value* res = cu->irb->CreateXor(src, static_cast<uint64_t>(-1));
-  DefineValue(cu, res, rl_dest.orig_sreg);
+  ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
+  ::llvm::Value* res = irb_->CreateXor(src, static_cast<uint64_t>(-1));
+  DefineValue(res, rl_dest.orig_sreg);
 }
 
-static void EmitConstructorBarrier(CompilationUnit* cu) {
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
+void MirConverter::EmitConstructorBarrier() {
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
       art::llvm::IntrinsicHelper::ConstructorBarrier);
-  cu->irb->CreateCall(intr);
+  irb_->CreateCall(intr);
 }
 
 /*
@@ -741,61 +736,61 @@
  * load/store utilities here, or target-dependent genXX() handlers
  * when necessary.
  */
-static bool ConvertMIRNode(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
+bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb,
                            ::llvm::BasicBlock* llvm_bb)
 {
   bool res = false;   // Assume success
   RegLocation rl_src[3];
-  RegLocation rl_dest = GetBadLoc();
+  RegLocation rl_dest = mir_graph_->GetBadLoc();
   Instruction::Code opcode = mir->dalvikInsn.opcode;
   int op_val = opcode;
   uint32_t vB = mir->dalvikInsn.vB;
   uint32_t vC = mir->dalvikInsn.vC;
   int opt_flags = mir->optimization_flags;
 
-  if (cu->verbose) {
+  if (cu_->verbose) {
     if (op_val < kMirOpFirst) {
       LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
     } else {
-      LOG(INFO) << extended_mir_op_names[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
+      LOG(INFO) << mir_graph_->extended_mir_op_names_[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
     }
   }
 
   /* Prep Src and Dest locations */
   int next_sreg = 0;
   int next_loc = 0;
-  int attrs = oat_data_flow_attributes[opcode];
-  rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
+  int attrs = mir_graph_->oat_data_flow_attributes_[opcode];
+  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
   if (attrs & DF_UA) {
     if (attrs & DF_A_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
       next_sreg+= 2;
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
       next_sreg++;
     }
   }
   if (attrs & DF_UB) {
     if (attrs & DF_B_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
       next_sreg+= 2;
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
       next_sreg++;
     }
   }
   if (attrs & DF_UC) {
     if (attrs & DF_C_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
     }
   }
   if (attrs & DF_DA) {
     if (attrs & DF_A_WIDE) {
-      rl_dest = GetDestWide(cu, mir);
+      rl_dest = mir_graph_->GetDestWide(mir);
     } else {
-      rl_dest = GetDest(cu, mir);
+      rl_dest = mir_graph_->GetDest(mir);
     }
   }
 
@@ -819,18 +814,18 @@
          * Insert a dummy intrinsic copy call, which will be recognized
          * by the quick path and removed by the portable path.
          */
-        ::llvm::Value* src = GetLLVMValue(cu, rl_src[0].orig_sreg);
-        ::llvm::Value* res = EmitCopy(cu, src, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+        ::llvm::Value* src = GetLLVMValue(rl_src[0].orig_sreg);
+        ::llvm::Value* res = EmitCopy(src, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
 
     case Instruction::CONST:
     case Instruction::CONST_4:
     case Instruction::CONST_16: {
-        ::llvm::Constant* imm_value = cu->irb->getJInt(vB);
-        ::llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+        ::llvm::Constant* imm_value = irb_->getJInt(vB);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
 
@@ -838,172 +833,172 @@
     case Instruction::CONST_WIDE_32: {
         // Sign extend to 64 bits
         int64_t imm = static_cast<int32_t>(vB);
-        ::llvm::Constant* imm_value = cu->irb->getJLong(imm);
-        ::llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+        ::llvm::Constant* imm_value = irb_->getJLong(imm);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
 
     case Instruction::CONST_HIGH16: {
-        ::llvm::Constant* imm_value = cu->irb->getJInt(vB << 16);
-        ::llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+        ::llvm::Constant* imm_value = irb_->getJInt(vB << 16);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
 
     case Instruction::CONST_WIDE: {
         ::llvm::Constant* imm_value =
-            cu->irb->getJLong(mir->dalvikInsn.vB_wide);
-        ::llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+            irb_->getJLong(mir->dalvikInsn.vB_wide);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
     case Instruction::CONST_WIDE_HIGH16: {
         int64_t imm = static_cast<int64_t>(vB) << 48;
-        ::llvm::Constant* imm_value = cu->irb->getJLong(imm);
-        ::llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
-        DefineValue(cu, res, rl_dest.orig_sreg);
+        ::llvm::Constant* imm_value = irb_->getJLong(imm);
+        ::llvm::Value* res = EmitConst(imm_value, rl_dest);
+        DefineValue(res, rl_dest.orig_sreg);
       }
       break;
 
     case Instruction::SPUT_OBJECT:
-      ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputObject,
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputObject,
                   rl_src[0]);
       break;
     case Instruction::SPUT:
       if (rl_src[0].fp) {
-        ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputFloat,
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputFloat,
                     rl_src[0]);
       } else {
-        ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSput, rl_src[0]);
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSput, rl_src[0]);
       }
       break;
     case Instruction::SPUT_BOOLEAN:
-      ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputBoolean,
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputBoolean,
                   rl_src[0]);
       break;
     case Instruction::SPUT_BYTE:
-      ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputByte, rl_src[0]);
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputByte, rl_src[0]);
       break;
     case Instruction::SPUT_CHAR:
-      ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputChar, rl_src[0]);
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputChar, rl_src[0]);
       break;
     case Instruction::SPUT_SHORT:
-      ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputShort, rl_src[0]);
+      ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputShort, rl_src[0]);
       break;
     case Instruction::SPUT_WIDE:
       if (rl_src[0].fp) {
-        ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputDouble,
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputDouble,
                     rl_src[0]);
       } else {
-        ConvertSput(cu, vB, art::llvm::IntrinsicHelper::HLSputWide,
+        ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputWide,
                     rl_src[0]);
       }
       break;
 
     case Instruction::SGET_OBJECT:
-      ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetObject, rl_dest);
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetObject, rl_dest);
       break;
     case Instruction::SGET:
       if (rl_dest.fp) {
-        ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetFloat, rl_dest);
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetFloat, rl_dest);
       } else {
-        ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSget, rl_dest);
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSget, rl_dest);
       }
       break;
     case Instruction::SGET_BOOLEAN:
-      ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetBoolean, rl_dest);
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetBoolean, rl_dest);
       break;
     case Instruction::SGET_BYTE:
-      ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetByte, rl_dest);
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetByte, rl_dest);
       break;
     case Instruction::SGET_CHAR:
-      ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetChar, rl_dest);
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetChar, rl_dest);
       break;
     case Instruction::SGET_SHORT:
-      ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetShort, rl_dest);
+      ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetShort, rl_dest);
       break;
     case Instruction::SGET_WIDE:
       if (rl_dest.fp) {
-        ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetDouble,
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetDouble,
                     rl_dest);
       } else {
-        ConvertSget(cu, vB, art::llvm::IntrinsicHelper::HLSgetWide, rl_dest);
+        ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetWide, rl_dest);
       }
       break;
 
     case Instruction::RETURN_WIDE:
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT: {
-        if (!(cu->attributes & METHOD_IS_LEAF)) {
-          EmitSuspendCheck(cu);
+        if (!mir_graph_->MethodIsLeaf()) {
+          EmitSuspendCheck();
         }
-        EmitPopShadowFrame(cu);
-        cu->irb->CreateRet(GetLLVMValue(cu, rl_src[0].orig_sreg));
+        EmitPopShadowFrame();
+        irb_->CreateRet(GetLLVMValue(rl_src[0].orig_sreg));
         DCHECK(bb->terminated_by_return);
       }
       break;
 
     case Instruction::RETURN_VOID: {
-        if (((cu->access_flags & kAccConstructor) != 0) &&
-            cu->compiler_driver->RequiresConstructorBarrier(Thread::Current(),
-                                                            cu->dex_file,
-                                                            cu->class_def_idx)) {
-          EmitConstructorBarrier(cu);
+        if (((cu_->access_flags & kAccConstructor) != 0) &&
+            cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(),
+                                                            cu_->dex_file,
+                                                            cu_->class_def_idx)) {
+          EmitConstructorBarrier();
         }
-        if (!(cu->attributes & METHOD_IS_LEAF)) {
-          EmitSuspendCheck(cu);
+        if (!mir_graph_->MethodIsLeaf()) {
+          EmitSuspendCheck();
         }
-        EmitPopShadowFrame(cu);
-        cu->irb->CreateRetVoid();
+        EmitPopShadowFrame();
+        irb_->CreateRetVoid();
         DCHECK(bb->terminated_by_return);
       }
       break;
 
     case Instruction::IF_EQ:
-      ConvertCompareAndBranch(cu, bb, mir, kCondEq, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondEq, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_NE:
-      ConvertCompareAndBranch(cu, bb, mir, kCondNe, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondNe, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_LT:
-      ConvertCompareAndBranch(cu, bb, mir, kCondLt, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondLt, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_GE:
-      ConvertCompareAndBranch(cu, bb, mir, kCondGe, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondGe, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_GT:
-      ConvertCompareAndBranch(cu, bb, mir, kCondGt, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondGt, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_LE:
-      ConvertCompareAndBranch(cu, bb, mir, kCondLe, rl_src[0], rl_src[1]);
+      ConvertCompareAndBranch(bb, mir, kCondLe, rl_src[0], rl_src[1]);
       break;
     case Instruction::IF_EQZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondEq, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondEq, rl_src[0]);
       break;
     case Instruction::IF_NEZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondNe, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondNe, rl_src[0]);
       break;
     case Instruction::IF_LTZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondLt, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondLt, rl_src[0]);
       break;
     case Instruction::IF_GEZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondGe, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondGe, rl_src[0]);
       break;
     case Instruction::IF_GTZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondGt, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondGt, rl_src[0]);
       break;
     case Instruction::IF_LEZ:
-      ConvertCompareZeroAndBranch(cu, bb, mir, kCondLe, rl_src[0]);
+      ConvertCompareZeroAndBranch(bb, mir, kCondLe, rl_src[0]);
       break;
 
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32: {
         if (bb->taken->start_offset <= bb->start_offset) {
-          EmitSuspendCheck(cu);
+          EmitSuspendCheck();
         }
-        cu->irb->CreateBr(GetLLVMBlock(cu, bb->taken->id));
+        irb_->CreateBr(GetLLVMBlock(bb->taken->id));
       }
       break;
 
@@ -1011,123 +1006,123 @@
     case Instruction::ADD_LONG_2ADDR:
     case Instruction::ADD_INT:
     case Instruction::ADD_INT_2ADDR:
-      ConvertArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
     case Instruction::SUB_INT:
     case Instruction::SUB_INT_2ADDR:
-      ConvertArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::MUL_LONG:
     case Instruction::MUL_LONG_2ADDR:
     case Instruction::MUL_INT:
     case Instruction::MUL_INT_2ADDR:
-      ConvertArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::DIV_LONG:
     case Instruction::DIV_LONG_2ADDR:
     case Instruction::DIV_INT:
     case Instruction::DIV_INT_2ADDR:
-      ConvertArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::REM_LONG:
     case Instruction::REM_LONG_2ADDR:
     case Instruction::REM_INT:
     case Instruction::REM_INT_2ADDR:
-      ConvertArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AND_LONG:
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_INT:
     case Instruction::AND_INT_2ADDR:
-      ConvertArithOp(cu, kOpAnd, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpAnd, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
     case Instruction::OR_INT:
     case Instruction::OR_INT_2ADDR:
-      ConvertArithOp(cu, kOpOr, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpOr, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
-      ConvertArithOp(cu, kOpXor, rl_dest, rl_src[0], rl_src[1]);
+      ConvertArithOp(kOpXor, rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::SHLLong,
+      ConvertShift(art::llvm::IntrinsicHelper::SHLLong,
                     rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::SHL_INT:
     case Instruction::SHL_INT_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::SHLInt,
+      ConvertShift(art::llvm::IntrinsicHelper::SHLInt,
                    rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::SHRLong,
+      ConvertShift(art::llvm::IntrinsicHelper::SHRLong,
                    rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::SHR_INT:
     case Instruction::SHR_INT_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::SHRInt,
+      ConvertShift(art::llvm::IntrinsicHelper::SHRInt,
                    rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::USHRLong,
+      ConvertShift(art::llvm::IntrinsicHelper::USHRLong,
                    rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
-      ConvertShift(cu, art::llvm::IntrinsicHelper::USHRInt,
+      ConvertShift(art::llvm::IntrinsicHelper::USHRInt,
                    rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::ADD_INT_LIT16:
     case Instruction::ADD_INT_LIT8:
-      ConvertArithOpLit(cu, kOpAdd, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpAdd, rl_dest, rl_src[0], vC);
       break;
     case Instruction::RSUB_INT:
     case Instruction::RSUB_INT_LIT8:
-      ConvertArithOpLit(cu, kOpRsub, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpRsub, rl_dest, rl_src[0], vC);
       break;
     case Instruction::MUL_INT_LIT16:
     case Instruction::MUL_INT_LIT8:
-      ConvertArithOpLit(cu, kOpMul, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpMul, rl_dest, rl_src[0], vC);
       break;
     case Instruction::DIV_INT_LIT16:
     case Instruction::DIV_INT_LIT8:
-      ConvertArithOpLit(cu, kOpDiv, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpDiv, rl_dest, rl_src[0], vC);
       break;
     case Instruction::REM_INT_LIT16:
     case Instruction::REM_INT_LIT8:
-      ConvertArithOpLit(cu, kOpRem, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpRem, rl_dest, rl_src[0], vC);
       break;
     case Instruction::AND_INT_LIT16:
     case Instruction::AND_INT_LIT8:
-      ConvertArithOpLit(cu, kOpAnd, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpAnd, rl_dest, rl_src[0], vC);
       break;
     case Instruction::OR_INT_LIT16:
     case Instruction::OR_INT_LIT8:
-      ConvertArithOpLit(cu, kOpOr, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpOr, rl_dest, rl_src[0], vC);
       break;
     case Instruction::XOR_INT_LIT16:
     case Instruction::XOR_INT_LIT8:
-      ConvertArithOpLit(cu, kOpXor, rl_dest, rl_src[0], vC);
+      ConvertArithOpLit(kOpXor, rl_dest, rl_src[0], vC);
       break;
     case Instruction::SHL_INT_LIT8:
-      ConvertShiftLit(cu, art::llvm::IntrinsicHelper::SHLInt,
+      ConvertShiftLit(art::llvm::IntrinsicHelper::SHLInt,
                       rl_dest, rl_src[0], vC & 0x1f);
       break;
     case Instruction::SHR_INT_LIT8:
-      ConvertShiftLit(cu, art::llvm::IntrinsicHelper::SHRInt,
+      ConvertShiftLit(art::llvm::IntrinsicHelper::SHRInt,
                       rl_dest, rl_src[0], vC & 0x1f);
       break;
     case Instruction::USHR_INT_LIT8:
-      ConvertShiftLit(cu, art::llvm::IntrinsicHelper::USHRInt,
+      ConvertShiftLit(art::llvm::IntrinsicHelper::USHRInt,
                       rl_dest, rl_src[0], vC & 0x1f);
       break;
 
@@ -1135,122 +1130,122 @@
     case Instruction::ADD_FLOAT_2ADDR:
     case Instruction::ADD_DOUBLE:
     case Instruction::ADD_DOUBLE_2ADDR:
-      ConvertFPArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+      ConvertFPArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::SUB_FLOAT:
     case Instruction::SUB_FLOAT_2ADDR:
     case Instruction::SUB_DOUBLE:
     case Instruction::SUB_DOUBLE_2ADDR:
-      ConvertFPArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
+      ConvertFPArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::MUL_FLOAT:
     case Instruction::MUL_FLOAT_2ADDR:
     case Instruction::MUL_DOUBLE:
     case Instruction::MUL_DOUBLE_2ADDR:
-      ConvertFPArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
+      ConvertFPArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::DIV_FLOAT:
     case Instruction::DIV_FLOAT_2ADDR:
     case Instruction::DIV_DOUBLE:
     case Instruction::DIV_DOUBLE_2ADDR:
-      ConvertFPArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+      ConvertFPArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::REM_FLOAT:
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_DOUBLE:
     case Instruction::REM_DOUBLE_2ADDR:
-      ConvertFPArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
+      ConvertFPArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::INVOKE_STATIC:
-      ConvertInvoke(cu, bb, mir, kStatic, false /*range*/,
+      ConvertInvoke(bb, mir, kStatic, false /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::INVOKE_STATIC_RANGE:
-      ConvertInvoke(cu, bb, mir, kStatic, true /*range*/,
+      ConvertInvoke(bb, mir, kStatic, true /*range*/,
                     false /* NewFilledArray */);
       break;
 
     case Instruction::INVOKE_DIRECT:
-      ConvertInvoke(cu, bb,  mir, kDirect, false /*range*/,
+      ConvertInvoke(bb,  mir, kDirect, false /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::INVOKE_DIRECT_RANGE:
-      ConvertInvoke(cu, bb, mir, kDirect, true /*range*/,
+      ConvertInvoke(bb, mir, kDirect, true /*range*/,
                     false /* NewFilledArray */);
       break;
 
     case Instruction::INVOKE_VIRTUAL:
-      ConvertInvoke(cu, bb, mir, kVirtual, false /*range*/,
+      ConvertInvoke(bb, mir, kVirtual, false /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::INVOKE_VIRTUAL_RANGE:
-      ConvertInvoke(cu, bb, mir, kVirtual, true /*range*/,
+      ConvertInvoke(bb, mir, kVirtual, true /*range*/,
                     false /* NewFilledArray */);
       break;
 
     case Instruction::INVOKE_SUPER:
-      ConvertInvoke(cu, bb, mir, kSuper, false /*range*/,
+      ConvertInvoke(bb, mir, kSuper, false /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::INVOKE_SUPER_RANGE:
-      ConvertInvoke(cu, bb, mir, kSuper, true /*range*/,
+      ConvertInvoke(bb, mir, kSuper, true /*range*/,
                     false /* NewFilledArray */);
       break;
 
     case Instruction::INVOKE_INTERFACE:
-      ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
+      ConvertInvoke(bb, mir, kInterface, false /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::INVOKE_INTERFACE_RANGE:
-      ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
+      ConvertInvoke(bb, mir, kInterface, true /*range*/,
                     false /* NewFilledArray */);
       break;
     case Instruction::FILLED_NEW_ARRAY:
-      ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
+      ConvertInvoke(bb, mir, kInterface, false /*range*/,
                     true /* NewFilledArray */);
       break;
     case Instruction::FILLED_NEW_ARRAY_RANGE:
-      ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
+      ConvertInvoke(bb, mir, kInterface, true /*range*/,
                     true /* NewFilledArray */);
       break;
 
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
-      ConvertConstObject(cu, vB, art::llvm::IntrinsicHelper::ConstString,
+      ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstString,
                          rl_dest);
       break;
 
     case Instruction::CONST_CLASS:
-      ConvertConstObject(cu, vB, art::llvm::IntrinsicHelper::ConstClass,
+      ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstClass,
                          rl_dest);
       break;
 
     case Instruction::CHECK_CAST:
-      ConvertCheckCast(cu, vB, rl_src[0]);
+      ConvertCheckCast(vB, rl_src[0]);
       break;
 
     case Instruction::NEW_INSTANCE:
-      ConvertNewInstance(cu, vB, rl_dest);
+      ConvertNewInstance(vB, rl_dest);
       break;
 
    case Instruction::MOVE_EXCEPTION:
-      ConvertMoveException(cu, rl_dest);
+      ConvertMoveException(rl_dest);
       break;
 
    case Instruction::THROW:
-      ConvertThrow(cu, rl_src[0]);
+      ConvertThrow(rl_src[0]);
       /*
        * If this throw is standalone, terminate.
        * If it might rethrow, force termination
        * of the following block.
        */
       if (bb->fall_through == NULL) {
-        cu->irb->CreateUnreachable();
+        irb_->CreateUnreachable();
       } else {
         bb->fall_through->fall_through = NULL;
         bb->fall_through->taken = NULL;
@@ -1267,291 +1262,291 @@
       break;
 
     case Instruction::MONITOR_ENTER:
-      ConvertMonitorEnterExit(cu, opt_flags,
+      ConvertMonitorEnterExit(opt_flags,
                               art::llvm::IntrinsicHelper::MonitorEnter,
                               rl_src[0]);
       break;
 
     case Instruction::MONITOR_EXIT:
-      ConvertMonitorEnterExit(cu, opt_flags,
+      ConvertMonitorEnterExit(opt_flags,
                               art::llvm::IntrinsicHelper::MonitorExit,
                               rl_src[0]);
       break;
 
     case Instruction::ARRAY_LENGTH:
-      ConvertArrayLength(cu, opt_flags, rl_dest, rl_src[0]);
+      ConvertArrayLength(opt_flags, rl_dest, rl_src[0]);
       break;
 
     case Instruction::NEW_ARRAY:
-      ConvertNewArray(cu, vC, rl_dest, rl_src[0]);
+      ConvertNewArray(vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::INSTANCE_OF:
-      ConvertInstanceOf(cu, vC, rl_dest, rl_src[0]);
+      ConvertInstanceOf(vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::AGET:
       if (rl_dest.fp) {
-        ConvertAget(cu, opt_flags,
+        ConvertAget(opt_flags,
                     art::llvm::IntrinsicHelper::HLArrayGetFloat,
                     rl_dest, rl_src[0], rl_src[1]);
       } else {
-        ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGet,
+        ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGet,
                     rl_dest, rl_src[0], rl_src[1]);
       }
       break;
     case Instruction::AGET_OBJECT:
-      ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGetObject,
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetObject,
                   rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AGET_BOOLEAN:
-      ConvertAget(cu, opt_flags,
+      ConvertAget(opt_flags,
                   art::llvm::IntrinsicHelper::HLArrayGetBoolean,
                   rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AGET_BYTE:
-      ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGetByte,
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetByte,
                   rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AGET_CHAR:
-      ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGetChar,
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetChar,
                   rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AGET_SHORT:
-      ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGetShort,
+      ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetShort,
                   rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::AGET_WIDE:
       if (rl_dest.fp) {
-        ConvertAget(cu, opt_flags,
+        ConvertAget(opt_flags,
                     art::llvm::IntrinsicHelper::HLArrayGetDouble,
                     rl_dest, rl_src[0], rl_src[1]);
       } else {
-        ConvertAget(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayGetWide,
+        ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetWide,
                     rl_dest, rl_src[0], rl_src[1]);
       }
       break;
 
     case Instruction::APUT:
       if (rl_src[0].fp) {
-        ConvertAput(cu, opt_flags,
+        ConvertAput(opt_flags,
                     art::llvm::IntrinsicHelper::HLArrayPutFloat,
                     rl_src[0], rl_src[1], rl_src[2]);
       } else {
-        ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPut,
+        ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPut,
                     rl_src[0], rl_src[1], rl_src[2]);
       }
       break;
     case Instruction::APUT_OBJECT:
-      ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPutObject,
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutObject,
                     rl_src[0], rl_src[1], rl_src[2]);
       break;
     case Instruction::APUT_BOOLEAN:
-      ConvertAput(cu, opt_flags,
+      ConvertAput(opt_flags,
                   art::llvm::IntrinsicHelper::HLArrayPutBoolean,
                     rl_src[0], rl_src[1], rl_src[2]);
       break;
     case Instruction::APUT_BYTE:
-      ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPutByte,
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutByte,
                     rl_src[0], rl_src[1], rl_src[2]);
       break;
     case Instruction::APUT_CHAR:
-      ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPutChar,
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutChar,
                     rl_src[0], rl_src[1], rl_src[2]);
       break;
     case Instruction::APUT_SHORT:
-      ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPutShort,
+      ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutShort,
                     rl_src[0], rl_src[1], rl_src[2]);
       break;
     case Instruction::APUT_WIDE:
       if (rl_src[0].fp) {
-        ConvertAput(cu, opt_flags,
+        ConvertAput(opt_flags,
                     art::llvm::IntrinsicHelper::HLArrayPutDouble,
                     rl_src[0], rl_src[1], rl_src[2]);
       } else {
-        ConvertAput(cu, opt_flags, art::llvm::IntrinsicHelper::HLArrayPutWide,
+        ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutWide,
                     rl_src[0], rl_src[1], rl_src[2]);
       }
       break;
 
     case Instruction::IGET:
       if (rl_dest.fp) {
-        ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetFloat,
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetFloat,
                     rl_dest, rl_src[0], vC);
       } else {
-        ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGet,
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGet,
                     rl_dest, rl_src[0], vC);
       }
       break;
     case Instruction::IGET_OBJECT:
-      ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetObject,
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetObject,
                   rl_dest, rl_src[0], vC);
       break;
     case Instruction::IGET_BOOLEAN:
-      ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetBoolean,
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetBoolean,
                   rl_dest, rl_src[0], vC);
       break;
     case Instruction::IGET_BYTE:
-      ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetByte,
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetByte,
                   rl_dest, rl_src[0], vC);
       break;
     case Instruction::IGET_CHAR:
-      ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetChar,
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetChar,
                   rl_dest, rl_src[0], vC);
       break;
     case Instruction::IGET_SHORT:
-      ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetShort,
+      ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetShort,
                   rl_dest, rl_src[0], vC);
       break;
     case Instruction::IGET_WIDE:
       if (rl_dest.fp) {
-        ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetDouble,
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetDouble,
                     rl_dest, rl_src[0], vC);
       } else {
-        ConvertIget(cu, opt_flags, art::llvm::IntrinsicHelper::HLIGetWide,
+        ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetWide,
                     rl_dest, rl_src[0], vC);
       }
       break;
     case Instruction::IPUT:
       if (rl_src[0].fp) {
-        ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutFloat,
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutFloat,
                     rl_src[0], rl_src[1], vC);
       } else {
-        ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPut,
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPut,
                     rl_src[0], rl_src[1], vC);
       }
       break;
     case Instruction::IPUT_OBJECT:
-      ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutObject,
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutObject,
                   rl_src[0], rl_src[1], vC);
       break;
     case Instruction::IPUT_BOOLEAN:
-      ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutBoolean,
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutBoolean,
                   rl_src[0], rl_src[1], vC);
       break;
     case Instruction::IPUT_BYTE:
-      ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutByte,
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutByte,
                   rl_src[0], rl_src[1], vC);
       break;
     case Instruction::IPUT_CHAR:
-      ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutChar,
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutChar,
                   rl_src[0], rl_src[1], vC);
       break;
     case Instruction::IPUT_SHORT:
-      ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutShort,
+      ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutShort,
                   rl_src[0], rl_src[1], vC);
       break;
     case Instruction::IPUT_WIDE:
       if (rl_src[0].fp) {
-        ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutDouble,
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutDouble,
                     rl_src[0], rl_src[1], vC);
       } else {
-        ConvertIput(cu, opt_flags, art::llvm::IntrinsicHelper::HLIPutWide,
+        ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutWide,
                     rl_src[0], rl_src[1], vC);
       }
       break;
 
     case Instruction::FILL_ARRAY_DATA:
-      ConvertFillArrayData(cu, vB, rl_src[0]);
+      ConvertFillArrayData(vB, rl_src[0]);
       break;
 
     case Instruction::LONG_TO_INT:
-      ConvertLongToInt(cu, rl_dest, rl_src[0]);
+      ConvertLongToInt(rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_LONG:
-      ConvertIntToLong(cu, rl_dest, rl_src[0]);
+      ConvertIntToLong(rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_CHAR:
-      ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+      ConvertIntNarrowing(rl_dest, rl_src[0],
                           art::llvm::IntrinsicHelper::IntToChar);
       break;
     case Instruction::INT_TO_BYTE:
-      ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+      ConvertIntNarrowing(rl_dest, rl_src[0],
                           art::llvm::IntrinsicHelper::IntToByte);
       break;
     case Instruction::INT_TO_SHORT:
-      ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+      ConvertIntNarrowing(rl_dest, rl_src[0],
                           art::llvm::IntrinsicHelper::IntToShort);
       break;
 
     case Instruction::INT_TO_FLOAT:
     case Instruction::LONG_TO_FLOAT:
-      ConvertIntToFP(cu, cu->irb->getFloatTy(), rl_dest, rl_src[0]);
+      ConvertIntToFP(irb_->getFloatTy(), rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_DOUBLE:
     case Instruction::LONG_TO_DOUBLE:
-      ConvertIntToFP(cu, cu->irb->getDoubleTy(), rl_dest, rl_src[0]);
+      ConvertIntToFP(irb_->getDoubleTy(), rl_dest, rl_src[0]);
       break;
 
     case Instruction::FLOAT_TO_DOUBLE:
-      ConvertFloatToDouble(cu, rl_dest, rl_src[0]);
+      ConvertFloatToDouble(rl_dest, rl_src[0]);
       break;
 
     case Instruction::DOUBLE_TO_FLOAT:
-      ConvertDoubleToFloat(cu, rl_dest, rl_src[0]);
+      ConvertDoubleToFloat(rl_dest, rl_src[0]);
       break;
 
     case Instruction::NEG_LONG:
     case Instruction::NEG_INT:
-      ConvertNeg(cu, rl_dest, rl_src[0]);
+      ConvertNeg(rl_dest, rl_src[0]);
       break;
 
     case Instruction::NEG_FLOAT:
     case Instruction::NEG_DOUBLE:
-      ConvertNegFP(cu, rl_dest, rl_src[0]);
+      ConvertNegFP(rl_dest, rl_src[0]);
       break;
 
     case Instruction::NOT_LONG:
     case Instruction::NOT_INT:
-      ConvertNot(cu, rl_dest, rl_src[0]);
+      ConvertNot(rl_dest, rl_src[0]);
       break;
 
     case Instruction::FLOAT_TO_INT:
-      ConvertFPToInt(cu, art::llvm::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
+      ConvertFPToInt(art::llvm::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
       break;
 
     case Instruction::DOUBLE_TO_INT:
-      ConvertFPToInt(cu, art::llvm::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
+      ConvertFPToInt(art::llvm::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
       break;
 
     case Instruction::FLOAT_TO_LONG:
-      ConvertFPToInt(cu, art::llvm::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
+      ConvertFPToInt(art::llvm::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
       break;
 
     case Instruction::DOUBLE_TO_LONG:
-      ConvertFPToInt(cu, art::llvm::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
+      ConvertFPToInt(art::llvm::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
       break;
 
     case Instruction::CMPL_FLOAT:
-      ConvertWideComparison(cu, art::llvm::IntrinsicHelper::CmplFloat,
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmplFloat,
                             rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::CMPG_FLOAT:
-      ConvertWideComparison(cu, art::llvm::IntrinsicHelper::CmpgFloat,
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgFloat,
                             rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::CMPL_DOUBLE:
-      ConvertWideComparison(cu, art::llvm::IntrinsicHelper::CmplDouble,
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmplDouble,
                             rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::CMPG_DOUBLE:
-      ConvertWideComparison(cu, art::llvm::IntrinsicHelper::CmpgDouble,
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgDouble,
                             rl_dest, rl_src[0], rl_src[1]);
       break;
     case Instruction::CMP_LONG:
-      ConvertWideComparison(cu, art::llvm::IntrinsicHelper::CmpLong,
+      ConvertWideComparison(art::llvm::IntrinsicHelper::CmpLong,
                             rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::PACKED_SWITCH:
-      ConvertPackedSwitch(cu, bb, vB, rl_src[0]);
+      ConvertPackedSwitch(bb, vB, rl_src[0]);
       break;
 
     case Instruction::SPARSE_SWITCH:
-      ConvertSparseSwitch(cu, bb, vB, rl_src[0]);
+      ConvertSparseSwitch(bb, vB, rl_src[0]);
       break;
 
     default:
@@ -1561,51 +1556,38 @@
   return res;
 }
 
-static void SetDexOffset(CompilationUnit* cu, int32_t offset)
+void MirConverter::SetDexOffset(int32_t offset)
 {
-  cu->current_dalvik_offset = offset;
+  current_dalvik_offset_ = offset;
   ::llvm::SmallVector< ::llvm::Value*, 1> array_ref;
-  array_ref.push_back(cu->irb->getInt32(offset));
-  ::llvm::MDNode* node = ::llvm::MDNode::get(*cu->context, array_ref);
-  cu->irb->SetDexOffset(node);
+  array_ref.push_back(irb_->getInt32(offset));
+  ::llvm::MDNode* node = ::llvm::MDNode::get(*context_, array_ref);
+  irb_->SetDexOffset(node);
 }
 
 // Attach method info as metadata to special intrinsic
-static void SetMethodInfo(CompilationUnit* cu)
+void MirConverter::SetMethodInfo()
 {
   // We don't want dex offset on this
-  cu->irb->SetDexOffset(NULL);
+  irb_->SetDexOffset(NULL);
   art::llvm::IntrinsicHelper::IntrinsicId id;
   id = art::llvm::IntrinsicHelper::MethodInfo;
-  ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
-  ::llvm::Instruction* inst = cu->irb->CreateCall(intr);
+  ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
+  ::llvm::Instruction* inst = irb_->CreateCall(intr);
   ::llvm::SmallVector< ::llvm::Value*, 2> reg_info;
-  reg_info.push_back(cu->irb->getInt32(cu->num_ins));
-  reg_info.push_back(cu->irb->getInt32(cu->num_regs));
-  reg_info.push_back(cu->irb->getInt32(cu->num_outs));
-  reg_info.push_back(cu->irb->getInt32(cu->num_compiler_temps));
-  reg_info.push_back(cu->irb->getInt32(cu->mir_graph->GetNumSSARegs()));
-  ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*cu->context, reg_info);
+  reg_info.push_back(irb_->getInt32(cu_->num_ins));
+  reg_info.push_back(irb_->getInt32(cu_->num_regs));
+  reg_info.push_back(irb_->getInt32(cu_->num_outs));
+  reg_info.push_back(irb_->getInt32(cu_->num_compiler_temps));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumSSARegs()));
+  ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*context_, reg_info);
   inst->setMetadata("RegInfo", reg_info_node);
-  int promo_size = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
-  ::llvm::SmallVector< ::llvm::Value*, 50> pmap;
-  for (int i = 0; i < promo_size; i++) {
-    PromotionMap* p = &cu->promotion_map[i];
-    int32_t map_data = ((p->first_in_pair & 0xff) << 24) |
-                      ((p->FpReg & 0xff) << 16) |
-                      ((p->core_reg & 0xff) << 8) |
-                      ((p->fp_location & 0xf) << 4) |
-                      (p->core_location & 0xf);
-    pmap.push_back(cu->irb->getInt32(map_data));
-  }
-  ::llvm::MDNode* map_node = ::llvm::MDNode::get(*cu->context, pmap);
-  inst->setMetadata("PromotionMap", map_node);
-  SetDexOffset(cu, cu->current_dalvik_offset);
+  SetDexOffset(current_dalvik_offset_);
 }
 
-static void HandlePhiNodes(CompilationUnit* cu, BasicBlock* bb, ::llvm::BasicBlock* llvm_bb)
+void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb)
 {
-  SetDexOffset(cu, bb->start_offset);
+  SetDexOffset(bb->start_offset);
   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int opcode = mir->dalvikInsn.opcode;
     if (opcode < kMirOpFirst) {
@@ -1616,7 +1598,7 @@
       // Skip other mir Pseudos.
       continue;
     }
-    RegLocation rl_dest = cu->reg_location[mir->ssa_rep->defs[0]];
+    RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
     /*
      * The Art compiler's Phi nodes only handle 32-bit operands,
      * representing wide values using a matched set of Phi nodes
@@ -1629,43 +1611,43 @@
     }
     int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
     ::llvm::Type* phi_type =
-        LlvmTypeFromLocRec(cu, rl_dest);
-    ::llvm::PHINode* phi = cu->irb->CreatePHI(phi_type, mir->ssa_rep->num_uses);
+        LlvmTypeFromLocRec(rl_dest);
+    ::llvm::PHINode* phi = irb_->CreatePHI(phi_type, mir->ssa_rep->num_uses);
     for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
       RegLocation loc;
       // Don't check width here.
-      loc = GetRawSrc(cu, mir, i);
+      loc = mir_graph_->GetRawSrc(mir, i);
       DCHECK_EQ(rl_dest.wide, loc.wide);
       DCHECK_EQ(rl_dest.wide & rl_dest.high_word, loc.wide & loc.high_word);
       DCHECK_EQ(rl_dest.fp, loc.fp);
       DCHECK_EQ(rl_dest.core, loc.core);
       DCHECK_EQ(rl_dest.ref, loc.ref);
       SafeMap<unsigned int, unsigned int>::iterator it;
-      it = cu->block_id_map.find(incoming[i]);
-      DCHECK(it != cu->block_id_map.end());
-      DCHECK(GetLLVMValue(cu, loc.orig_sreg) != NULL);
-      DCHECK(GetLLVMBlock(cu, it->second) != NULL);
-      phi->addIncoming(GetLLVMValue(cu, loc.orig_sreg),
-                       GetLLVMBlock(cu, it->second));
+      it = mir_graph_->block_id_map_.find(incoming[i]);
+      DCHECK(it != mir_graph_->block_id_map_.end());
+      DCHECK(GetLLVMValue(loc.orig_sreg) != NULL);
+      DCHECK(GetLLVMBlock(it->second) != NULL);
+      phi->addIncoming(GetLLVMValue(loc.orig_sreg),
+                       GetLLVMBlock(it->second));
     }
-    DefineValueOnly(cu, phi, rl_dest.orig_sreg);
+    DefineValueOnly(phi, rl_dest.orig_sreg);
   }
 }
 
 /* Extended MIR instructions like PHI */
-static void ConvertExtendedMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir,
                                ::llvm::BasicBlock* llvm_bb)
 {
 
   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
     case kMirOpPhi: {
       // The llvm Phi node already emitted - just DefineValue() here.
-      RegLocation rl_dest = cu->reg_location[mir->ssa_rep->defs[0]];
+      RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
       if (!rl_dest.high_word) {
         // Only consider low word of pairs.
-        DCHECK(GetLLVMValue(cu, rl_dest.orig_sreg) != NULL);
-        ::llvm::Value* phi = GetLLVMValue(cu, rl_dest.orig_sreg);
-        if (1) SetVregOnValue(cu, phi, rl_dest.orig_sreg);
+        DCHECK(GetLLVMValue(rl_dest.orig_sreg) != NULL);
+        ::llvm::Value* phi = GetLLVMValue(rl_dest.orig_sreg);
+        if (1) SetVregOnValue(phi, rl_dest.orig_sreg);
       }
       break;
     }
@@ -1676,7 +1658,7 @@
     case kMirOpNop:
       if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
           (bb->fall_through == NULL)) {
-        cu->irb->CreateUnreachable();
+        irb_->CreateUnreachable();
       }
       break;
 
@@ -1702,18 +1684,18 @@
 }
 
 /* Handle the content in each basic block */
-static bool BlockBitcodeConversion(CompilationUnit* cu, BasicBlock* bb)
+bool MirConverter::BlockBitcodeConversion(BasicBlock* bb)
 {
   if (bb->block_type == kDead) return false;
-  ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(cu, bb->id);
+  ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(bb->id);
   if (llvm_bb == NULL) {
     CHECK(bb->block_type == kExitBlock);
   } else {
-    cu->irb->SetInsertPoint(llvm_bb);
-    SetDexOffset(cu, bb->start_offset);
+    irb_->SetInsertPoint(llvm_bb);
+    SetDexOffset(bb->start_offset);
   }
 
-  if (cu->verbose) {
+  if (cu_->verbose) {
     LOG(INFO) << "................................";
     LOG(INFO) << "Block id " << bb->id;
     if (llvm_bb != NULL) {
@@ -1724,36 +1706,36 @@
   }
 
   if (bb->block_type == kEntryBlock) {
-    SetMethodInfo(cu);
+    SetMethodInfo();
 
     { // Allocate shadowframe.
       art::llvm::IntrinsicHelper::IntrinsicId id =
               art::llvm::IntrinsicHelper::AllocaShadowFrame;
-      ::llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
-      ::llvm::Value* entries = cu->irb->getInt32(cu->num_dalvik_registers);
-      cu->irb->CreateCall(func, entries);
+      ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
+      ::llvm::Value* entries = irb_->getInt32(cu_->num_dalvik_registers);
+      irb_->CreateCall(func, entries);
     }
 
     { // Store arguments to vregs.
-      uint16_t arg_reg = cu->num_regs;
+      uint16_t arg_reg = cu_->num_regs;
 
-      ::llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
-      ::llvm::Function::arg_iterator arg_end(cu->func->arg_end());
+      ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
+      ::llvm::Function::arg_iterator arg_end(func_->arg_end());
 
-      const char* shorty = cu->shorty;
+      const char* shorty = cu_->shorty;
       uint32_t shorty_size = strlen(shorty);
       CHECK_GE(shorty_size, 1u);
 
       ++arg_iter; // skip method object
 
-      if ((cu->access_flags & kAccStatic) == 0) {
-        SetVregOnValue(cu, arg_iter, arg_reg);
+      if ((cu_->access_flags & kAccStatic) == 0) {
+        SetVregOnValue(arg_iter, arg_reg);
         ++arg_iter;
         ++arg_reg;
       }
 
       for (uint32_t i = 1; i < shorty_size; ++i, ++arg_iter) {
-        SetVregOnValue(cu, arg_iter, arg_reg);
+        SetVregOnValue(arg_iter, arg_reg);
 
         ++arg_reg;
         if (shorty[i] == 'J' || shorty[i] == 'D') {
@@ -1779,11 +1761,11 @@
     return false;
   }
 
-  HandlePhiNodes(cu, bb, llvm_bb);
+  HandlePhiNodes(bb, llvm_bb);
 
   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
 
-    SetDexOffset(cu, mir->offset);
+    SetDexOffset(mir->offset);
 
     int opcode = mir->dalvikInsn.opcode;
     Instruction::Format dalvik_format =
@@ -1800,38 +1782,38 @@
       work_half->meta.original_opcode = work_half->dalvikInsn.opcode;
       work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
       if (bb->successor_block_list.block_list_type == kCatch) {
-        ::llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
+        ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
             art::llvm::IntrinsicHelper::CatchTargets);
         ::llvm::Value* switch_key =
-            cu->irb->CreateCall(intr, cu->irb->getInt32(mir->offset));
+            irb_->CreateCall(intr, irb_->getInt32(mir->offset));
         GrowableListIterator iter;
         GrowableListIteratorInit(&bb->successor_block_list.blocks, &iter);
         // New basic block to use for work half
         ::llvm::BasicBlock* work_bb =
-            ::llvm::BasicBlock::Create(*cu->context, "", cu->func);
+            ::llvm::BasicBlock::Create(*context_, "", func_);
         ::llvm::SwitchInst* sw =
-            cu->irb->CreateSwitch(switch_key, work_bb,
+            irb_->CreateSwitch(switch_key, work_bb,
                                      bb->successor_block_list.blocks.num_used);
         while (true) {
           SuccessorBlockInfo *successor_block_info =
               reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iter));
           if (successor_block_info == NULL) break;
           ::llvm::BasicBlock *target =
-              GetLLVMBlock(cu, successor_block_info->block->id);
+              GetLLVMBlock(successor_block_info->block->id);
           int type_index = successor_block_info->key;
-          sw->addCase(cu->irb->getInt32(type_index), target);
+          sw->addCase(irb_->getInt32(type_index), target);
         }
         llvm_bb = work_bb;
-        cu->irb->SetInsertPoint(llvm_bb);
+        irb_->SetInsertPoint(llvm_bb);
       }
     }
 
     if (opcode >= kMirOpFirst) {
-      ConvertExtendedMIR(cu, bb, mir, llvm_bb);
+      ConvertExtendedMIR(bb, mir, llvm_bb);
       continue;
     }
 
-    bool not_handled = ConvertMIRNode(cu, mir, bb, llvm_bb);
+    bool not_handled = ConvertMIRNode(mir, bb, llvm_bb);
     if (not_handled) {
       Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
       LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
@@ -1842,9 +1824,9 @@
   }
 
   if (bb->block_type == kEntryBlock) {
-    cu->entryTarget_bb = GetLLVMBlock(cu, bb->fall_through->id);
+    entry_target_bb_ = GetLLVMBlock(bb->fall_through->id);
   } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
-    cu->irb->CreateBr(GetLLVMBlock(cu, bb->fall_through->id));
+    irb_->CreateBr(GetLLVMBlock(bb->fall_through->id));
   }
 
   return false;
@@ -1872,74 +1854,74 @@
   return shorty_type;
 }
 
-static ::llvm::FunctionType* GetFunctionType(CompilationUnit* cu) {
+::llvm::FunctionType* MirConverter::GetFunctionType() {
 
   // Get return type
-  ::llvm::Type* ret_type = cu->irb->getJType(RemapShorty(cu->shorty[0]));
+  ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0]));
 
   // Get argument type
   std::vector< ::llvm::Type*> args_type;
 
   // method object
-  args_type.push_back(cu->irb->getJMethodTy());
+  args_type.push_back(irb_->getJMethodTy());
 
   // Do we have  a "this"?
-  if ((cu->access_flags & kAccStatic) == 0) {
-    args_type.push_back(cu->irb->getJObjectTy());
+  if ((cu_->access_flags & kAccStatic) == 0) {
+    args_type.push_back(irb_->getJObjectTy());
   }
 
-  for (uint32_t i = 1; i < strlen(cu->shorty); ++i) {
-    args_type.push_back(cu->irb->getJType(RemapShorty(cu->shorty[i])));
+  for (uint32_t i = 1; i < strlen(cu_->shorty); ++i) {
+    args_type.push_back(irb_->getJType(RemapShorty(cu_->shorty[i])));
   }
 
   return ::llvm::FunctionType::get(ret_type, args_type, false);
 }
 
-static bool CreateFunction(CompilationUnit* cu) {
-  ::llvm::FunctionType* func_type = GetFunctionType(cu);
+bool MirConverter::CreateFunction() {
+  ::llvm::FunctionType* func_type = GetFunctionType();
   if (func_type == NULL) {
     return false;
   }
 
-  cu->func = ::llvm::Function::Create(func_type,
+  func_ = ::llvm::Function::Create(func_type,
                                       ::llvm::Function::InternalLinkage,
-                                      cu->symbol, cu->module);
+                                      symbol_, module_);
 
-  ::llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
-  ::llvm::Function::arg_iterator arg_end(cu->func->arg_end());
+  ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
+  ::llvm::Function::arg_iterator arg_end(func_->arg_end());
 
   arg_iter->setName("method");
   ++arg_iter;
 
-  int start_sreg = cu->num_regs;
+  int start_sreg = cu_->num_regs;
 
   for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
     arg_iter->setName(StringPrintf("v%i_0", start_sreg));
-    start_sreg += cu->reg_location[start_sreg].wide ? 2 : 1;
+    start_sreg += mir_graph_->reg_location_[start_sreg].wide ? 2 : 1;
   }
 
   return true;
 }
 
-static bool CreateLLVMBasicBlock(CompilationUnit* cu, BasicBlock* bb)
+bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb)
 {
   // Skip the exit block
   if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) {
-    cu->id_to_block_map.Put(bb->id, NULL);
+    id_to_block_map_.Put(bb->id, NULL);
   } else {
     int offset = bb->start_offset;
     bool entry_block = (bb->block_type == kEntryBlock);
     ::llvm::BasicBlock* llvm_bb =
-        ::llvm::BasicBlock::Create(*cu->context, entry_block ? "entry" :
+        ::llvm::BasicBlock::Create(*context_, entry_block ? "entry" :
                                  StringPrintf(kLabelFormat, bb->catch_entry ? kCatchBlock :
-                                              kNormalBlock, offset, bb->id), cu->func);
+                                              kNormalBlock, offset, bb->id), func_);
     if (entry_block) {
-        cu->entry_bb = llvm_bb;
-        cu->placeholder_bb =
-            ::llvm::BasicBlock::Create(*cu->context, "placeholder",
-                                     cu->func);
+        entry_bb_ = llvm_bb;
+        placeholder_bb_ =
+            ::llvm::BasicBlock::Create(*context_, "placeholder",
+                                     func_);
     }
-    cu->id_to_block_map.Put(bb->id, llvm_bb);
+    id_to_block_map_.Put(bb->id, llvm_bb);
   }
   return false;
 }
@@ -1953,18 +1935,18 @@
  *  o Iterate through the MIR a basic block at a time, setting arguments
  *    to recovered ssa name.
  */
-void MethodMIR2Bitcode(CompilationUnit* cu)
+void MirConverter::MethodMIR2Bitcode()
 {
-  InitIR(cu);
-  CompilerInitGrowableList(cu, &cu->llvm_values, cu->mir_graph->GetNumSSARegs());
+  InitIR();
+  CompilerInitGrowableList(cu_, &llvm_values_, mir_graph_->GetNumSSARegs());
 
   // Create the function
-  CreateFunction(cu);
+  CreateFunction();
 
   // Create an LLVM basic block for each MIR block in dfs preorder
-  PreOrderDfsIterator iter(cu->mir_graph.get(), false /* not iterative */);
+  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
-    CreateLLVMBasicBlock(cu, bb);
+    CreateLLVMBasicBlock(bb);
   }
 
   /*
@@ -1972,31 +1954,31 @@
    * placeholders for all non-argument values (because we haven't seen
    * the definition yet).
    */
-  cu->irb->SetInsertPoint(cu->placeholder_bb);
-  ::llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
+  irb_->SetInsertPoint(placeholder_bb_);
+  ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
   arg_iter++;  /* Skip path method */
-  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
     ::llvm::Value* val;
-    RegLocation rl_temp = cu->reg_location[i];
-    if ((cu->mir_graph->SRegToVReg(i) < 0) || rl_temp.high_word) {
-      InsertGrowableList(cu, &cu->llvm_values, 0);
-    } else if ((i < cu->num_regs) ||
-               (i >= (cu->num_regs + cu->num_ins))) {
-      ::llvm::Constant* imm_value = cu->reg_location[i].wide ?
-         cu->irb->getJLong(0) : cu->irb->getJInt(0);
-      val = EmitConst(cu, imm_value, cu->reg_location[i]);
-      val->setName(cu->mir_graph->GetSSAString(i));
-      InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(val));
+    RegLocation rl_temp = mir_graph_->reg_location_[i];
+    if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
+      InsertGrowableList(cu_, &llvm_values_, 0);
+    } else if ((i < cu_->num_regs) ||
+               (i >= (cu_->num_regs + cu_->num_ins))) {
+      ::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
+         irb_->getJLong(0) : irb_->getJInt(0);
+      val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
+      val->setName(mir_graph_->GetSSAString(i));
+      InsertGrowableList(cu_, &llvm_values_, reinterpret_cast<uintptr_t>(val));
     } else {
       // Recover previously-created argument values
       ::llvm::Value* arg_val = arg_iter++;
-      InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(arg_val));
+      InsertGrowableList(cu_, &llvm_values_, reinterpret_cast<uintptr_t>(arg_val));
     }
   }
 
-  PreOrderDfsIterator iter2(cu->mir_graph.get(), false /* not iterative */);
+  PreOrderDfsIterator iter2(mir_graph_, false /* not iterative */);
   for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
-    BlockBitcodeConversion(cu, bb);
+    BlockBitcodeConversion(bb);
   }
 
   /*
@@ -2012,8 +1994,8 @@
    * If any definitions remain, we link the placeholder block into the
    * CFG.  Otherwise, it is deleted.
    */
-  for (::llvm::BasicBlock::iterator it = cu->placeholder_bb->begin(),
-       it_end = cu->placeholder_bb->end(); it != it_end;) {
+  for (::llvm::BasicBlock::iterator it = placeholder_bb_->begin(),
+       it_end = placeholder_bb_->end(); it != it_end;) {
     ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(it++);
     DCHECK(inst != NULL);
     ::llvm::Value* val = ::llvm::dyn_cast< ::llvm::Value>(inst);
@@ -2022,31 +2004,31 @@
       inst->eraseFromParent();
     }
   }
-  SetDexOffset(cu, 0);
-  if (cu->placeholder_bb->empty()) {
-    cu->placeholder_bb->eraseFromParent();
+  SetDexOffset(0);
+  if (placeholder_bb_->empty()) {
+    placeholder_bb_->eraseFromParent();
   } else {
-    cu->irb->SetInsertPoint(cu->placeholder_bb);
-    cu->irb->CreateBr(cu->entryTarget_bb);
-    cu->entryTarget_bb = cu->placeholder_bb;
+    irb_->SetInsertPoint(placeholder_bb_);
+    irb_->CreateBr(entry_target_bb_);
+    entry_target_bb_ = placeholder_bb_;
   }
-  cu->irb->SetInsertPoint(cu->entry_bb);
-  cu->irb->CreateBr(cu->entryTarget_bb);
+  irb_->SetInsertPoint(entry_bb_);
+  irb_->CreateBr(entry_target_bb_);
 
-  if (cu->enable_debug & (1 << kDebugVerifyBitcode)) {
-     if (::llvm::verifyFunction(*cu->func, ::llvm::PrintMessageAction)) {
+  if (cu_->enable_debug & (1 << kDebugVerifyBitcode)) {
+     if (::llvm::verifyFunction(*func_, ::llvm::PrintMessageAction)) {
        LOG(INFO) << "Bitcode verification FAILED for "
-                 << PrettyMethod(cu->method_idx, *cu->dex_file)
-                 << " of size " << cu->code_item->insns_size_in_code_units_;
-       cu->enable_debug |= (1 << kDebugDumpBitcodeFile);
+                 << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+                 << " of size " << cu_->code_item->insns_size_in_code_units_;
+       cu_->enable_debug |= (1 << kDebugDumpBitcodeFile);
      }
   }
 
-  if (cu->enable_debug & (1 << kDebugDumpBitcodeFile)) {
+  if (cu_->enable_debug & (1 << kDebugDumpBitcodeFile)) {
     // Write bitcode to file
     std::string errmsg;
-    std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
-    ReplaceSpecialChars(fname);
+    std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
+    mir_graph_->ReplaceSpecialChars(fname);
     // TODO: make configurable change naming mechanism to avoid fname length issues.
     fname = StringPrintf("/sdcard/Bitcode/%s.bc", fname.c_str());
 
@@ -2063,9 +2045,14 @@
       LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
     }
 
-    ::llvm::WriteBitcodeToFile(cu->module, out_file->os());
+    ::llvm::WriteBitcodeToFile(module_, out_file->os());
     out_file->keep();
   }
 }
 
+Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               llvm::LlvmCompilationUnit* const llvm_compilation_unit) {
+  return new MirConverter(cu, mir_graph, llvm_compilation_unit);
+}
+
 }  // namespace art
diff --git a/src/compiler/dex/portable/mir_to_gbc.h b/src/compiler/dex/portable/mir_to_gbc.h
index 48faf75..eb7069c 100644
--- a/src/compiler/dex/portable/mir_to_gbc.h
+++ b/src/compiler/dex/portable/mir_to_gbc.h
@@ -17,9 +17,176 @@
 #ifndef ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
 #define ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
 
+#include "invoke_type.h"
+#include "compiled_method.h"
+#include "compiler/dex/compiler_enums.h"
+#include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_utility.h"
+#include "compiler/dex/backend.h"
+#include "compiler/llvm/llvm_compilation_unit.h"
+#include "safe_map.h"
+
 namespace art {
 
-void MethodMIR2Bitcode(CompilationUnit* cu);
+struct BasicBlock;
+struct CallInfo;
+struct CompilationUnit;
+struct MIR;
+struct RegLocation;
+struct RegisterInfo;
+class MIRGraph;
+
+// Target-specific initialization.
+Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               llvm::LlvmCompilationUnit* const llvm_compilation_unit);
+
+class MirConverter : public Backend {
+
+  public:
+    // TODO: flesh out and integrate into new world order.
+    MirConverter(CompilationUnit* cu, MIRGraph* mir_graph,
+                 llvm::LlvmCompilationUnit* llvm_compilation_unit)
+      : cu_(cu),
+        mir_graph_(mir_graph),
+        llvm_compilation_unit_(llvm_compilation_unit),
+        llvm_info_(llvm_compilation_unit->GetQuickContext()),
+        symbol_(llvm_compilation_unit->GetDexCompilationUnit()->GetSymbol()),
+        context_(NULL),
+        module_(NULL),
+        func_(NULL),
+        intrinsic_helper_(NULL),
+        irb_(NULL),
+        placeholder_bb_(NULL),
+        entry_bb_(NULL),
+        entry_target_bb_(NULL),
+        temp_name_(0),
+        current_dalvik_offset_(0) {
+      if (kIsDebugBuild) {
+        cu->enable_debug |= (1 << kDebugVerifyBitcode);
+      }
+    }
+
+    void Materialize() {
+      MethodMIR2Bitcode();
+    }
+
+    CompiledMethod* GetCompiledMethod() {
+      return NULL;
+    }
+
+  private:
+    ::llvm::BasicBlock* GetLLVMBlock(int id);
+    ::llvm::Value* GetLLVMValue(int s_reg);
+    void SetVregOnValue(::llvm::Value* val, int s_reg);
+    void DefineValueOnly(::llvm::Value* val, int s_reg);
+    void DefineValue(::llvm::Value* val, int s_reg);
+    ::llvm::Type* LlvmTypeFromLocRec(RegLocation loc);
+    void InitIR();
+    ::llvm::BasicBlock* FindCaseTarget(uint32_t vaddr);
+    void ConvertPackedSwitch(BasicBlock* bb, int32_t table_offset,
+                             RegLocation rl_src);
+    void ConvertSparseSwitch(BasicBlock* bb, int32_t table_offset,
+                             RegLocation rl_src);
+    void ConvertSget(int32_t field_index,
+                     art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
+    void ConvertSput(int32_t field_index,
+                     art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
+    void ConvertFillArrayData(int32_t offset, RegLocation rl_array);
+    ::llvm::Value* EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
+                             RegLocation loc);
+    void EmitPopShadowFrame();
+    ::llvm::Value* EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
+                            RegLocation loc);
+    void ConvertMoveException(RegLocation rl_dest);
+    void ConvertThrow(RegLocation rl_src);
+    void ConvertMonitorEnterExit(int opt_flags,
+                                 art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
+    void ConvertArrayLength(int opt_flags, RegLocation rl_dest,
+                            RegLocation rl_src);
+    void EmitSuspendCheck();
+    ::llvm::Value* ConvertCompare(ConditionCode cc,
+                                  ::llvm::Value* src1, ::llvm::Value* src2);
+    void ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertCompareZeroAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
+                                     RegLocation rl_src1);
+    ::llvm::Value* GenDivModOp(bool is_div, bool is_long, ::llvm::Value* src1,
+                               ::llvm::Value* src2);
+    ::llvm::Value* GenArithOp(OpKind op, bool is_long, ::llvm::Value* src1,
+                              ::llvm::Value* src2);
+    void ConvertFPArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
+                      RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
+                         RegLocation rl_dest, RegLocation rl_src, int shift_amount);
+    void ConvertArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2);
+    void ConvertArithOpLit(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+                           int32_t imm);
+    void ConvertInvoke(BasicBlock* bb, MIR* mir, InvokeType invoke_type,
+                       bool is_range, bool is_filled_new_array);
+    void ConvertConstObject(uint32_t idx,
+                            art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
+    void ConvertCheckCast(uint32_t type_idx, RegLocation rl_src);
+    void ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest);
+    void ConvertNewArray(uint32_t type_idx, RegLocation rl_dest,
+                         RegLocation rl_src);
+    void ConvertAget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index);
+    void ConvertAput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_src, RegLocation rl_array, RegLocation rl_index);
+    void ConvertIget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_dest, RegLocation rl_obj, int field_index);
+    void ConvertIput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
+                     RegLocation rl_src, RegLocation rl_obj, int field_index);
+    void ConvertInstanceOf(uint32_t type_idx, RegLocation rl_dest,
+                           RegLocation rl_src);
+    void ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
+                               RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
+                             art::llvm::IntrinsicHelper::IntrinsicId id);
+    void ConvertNeg(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, RegLocation rl_src);
+    void ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
+                        RegLocation rl_dest, RegLocation rl_src);
+    void ConvertNegFP(RegLocation rl_dest, RegLocation rl_src);
+    void ConvertNot(RegLocation rl_dest, RegLocation rl_src);
+    void EmitConstructorBarrier();
+    bool ConvertMIRNode(MIR* mir, BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
+    void SetDexOffset(int32_t offset);
+    void SetMethodInfo();
+    void HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
+    void ConvertExtendedMIR(BasicBlock* bb, MIR* mir, ::llvm::BasicBlock* llvm_bb);
+    bool BlockBitcodeConversion(BasicBlock* bb);
+    ::llvm::FunctionType* GetFunctionType();
+    bool CreateFunction();
+    bool CreateLLVMBasicBlock(BasicBlock* bb);
+    void MethodMIR2Bitcode();
+
+    CompilationUnit* cu_;
+    MIRGraph* mir_graph_;
+    llvm::LlvmCompilationUnit* const llvm_compilation_unit_;
+    LLVMInfo* llvm_info_;
+    std::string symbol_;
+    ::llvm::LLVMContext* context_;
+    ::llvm::Module* module_;
+    ::llvm::Function* func_;
+    art::llvm::IntrinsicHelper* intrinsic_helper_;
+    art::llvm::IRBuilder* irb_;
+    ::llvm::BasicBlock* placeholder_bb_;
+    ::llvm::BasicBlock* entry_bb_;
+    ::llvm::BasicBlock* entry_target_bb_;
+    std::string bitcode_filename_;
+    GrowableList llvm_values_;
+    int32_t temp_name_;
+    SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map_;  // block id -> llvm bb.
+    int current_dalvik_offset_;
+};  // Class MirConverter
 
 }  // namespace art
 
diff --git a/src/compiler/dex/quick/arm/assemble_arm.cc b/src/compiler/dex/quick/arm/assemble_arm.cc
index 253d5e9..23a87dc 100644
--- a/src/compiler/dex/quick/arm/assemble_arm.cc
+++ b/src/compiler/dex/quick/arm/assemble_arm.cc
@@ -16,7 +16,6 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
 
 namespace art {
 
@@ -76,7 +75,7 @@
  *  [!] escape.  To insert "!", use "!!"
  */
 /* NOTE: must be kept in sync with enum ArmOpcode from LIR.h */
-const ArmEncodingMap ArmCodegen::EncodingMap[kArmLast] = {
+const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
     ENCODING_MAP(kArm16BitData,    0x0000,
                  kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 2),
@@ -1002,18 +1001,18 @@
  * discover that pc-relative displacements may not fit the selected
  * instruction.
  */
-AssemblerStatus ArmCodegen::AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr)
+AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr)
 {
   LIR* lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
-  for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
 
     if (lir->opcode < 0) {
       /* 1 means padding is needed */
       if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
-        cu->code_buffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
-        cu->code_buffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+        code_buffer_.push_back(PADDING_MOV_R5_R5 & 0xFF);
+        code_buffer_.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
       }
       continue;
     }
@@ -1073,7 +1072,7 @@
               ?  lir->operands[0] : rARM_LR;
 
           // Add new Adr to generate the address.
-          LIR* new_adr = RawLIR(cu, lir->dalvik_offset, kThumb2Adr,
+          LIR* new_adr = RawLIR(lir->dalvik_offset, kThumb2Adr,
                      base_reg, 0, 0, 0, 0, lir->target);
           InsertLIRBefore(lir, new_adr);
 
@@ -1091,7 +1090,7 @@
             lir->operands[2] = 0;
             lir->operands[1] = base_reg;
           }
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         } else {
           if ((lir->opcode == kThumb2Vldrs) ||
@@ -1114,7 +1113,7 @@
            * Make new branch instruction and insert after
            */
           LIR* new_inst =
-            RawLIR(cu, lir->dalvik_offset, kThumbBCond, 0,
+            RawLIR(lir->dalvik_offset, kThumbBCond, 0,
                    (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
                    0, 0, 0, lir->target);
           InsertLIRAfter(lir, new_inst);
@@ -1123,7 +1122,7 @@
           /* operand[0] is src1 in both cb[n]z & CmpRI8 */
           lir->operands[1] = 0;
           lir->target = 0;
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         } else {
           lir->operands[1] = delta >> 1;
@@ -1148,7 +1147,7 @@
             }
           }
           lir->operands[0] = reg;
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         }
       } else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
@@ -1160,7 +1159,7 @@
         delta = target - pc;
         if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
           lir->opcode = kThumb2BCond;
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         }
         lir->operands[0] = delta >> 1;
@@ -1170,7 +1169,7 @@
         uintptr_t target = target_lir->offset;
         int delta = target - pc;
         lir->operands[0] = delta >> 1;
-        if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
+        if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
           lir->operands[0] == 0) {  // Useless branch
           lir->flags.is_nop = true;
           res = kRetryAll;
@@ -1184,11 +1183,11 @@
           // Convert to Thumb2BCond w/ kArmCondAl
           lir->opcode = kThumb2BUncond;
           lir->operands[0] = 0;
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         } else {
           lir->operands[0] = delta >> 1;
-          if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
+          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) &&
             lir->operands[0] == -1) {  // Useless branch
             lir->flags.is_nop = true;
             res = kRetryAll;
@@ -1232,12 +1231,12 @@
           // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
           // TUNING: if this case fires often, it can be improved.  Not expected to be common.
           LIR *new_mov16L =
-              RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16LST,
+              RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
                      lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
                      reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
           InsertLIRBefore(lir, new_mov16L);
           LIR *new_mov16H =
-              RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16HST,
+              RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
                      lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
                      reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
           InsertLIRBefore(lir, new_mov16H);
@@ -1247,7 +1246,7 @@
             lir->opcode = kThumbAddRRHH;
           }
           lir->operands[1] = rARM_PC;
-          SetupResourceMasks(cu, lir);
+          SetupResourceMasks(lir);
           res = kRetryAll;
         }
       } else if (lir->opcode == kThumb2MovImm16LST) {
@@ -1380,16 +1379,16 @@
       }
     }
     if (encoder->size == 4) {
-      cu->code_buffer.push_back((bits >> 16) & 0xff);
-      cu->code_buffer.push_back((bits >> 24) & 0xff);
+      code_buffer_.push_back((bits >> 16) & 0xff);
+      code_buffer_.push_back((bits >> 24) & 0xff);
     }
-    cu->code_buffer.push_back(bits & 0xff);
-    cu->code_buffer.push_back((bits >> 8) & 0xff);
+    code_buffer_.push_back(bits & 0xff);
+    code_buffer_.push_back((bits >> 8) & 0xff);
   }
   return res;
 }
 
-int ArmCodegen::GetInsnSize(LIR* lir)
+int ArmMir2Lir::GetInsnSize(LIR* lir)
 {
   return EncodingMap[lir->opcode].size;
 }
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index a201fd8..bb46e1f 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -18,18 +18,16 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 
 namespace art {
 
 
 /* Return the position of an ssa name within the argument list */
-static int InPosition(CompilationUnit* cu, int s_reg)
+int ArmMir2Lir::InPosition(int s_reg)
 {
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
-  return v_reg - cu->num_regs;
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  return v_reg - cu_->num_regs;
 }
 
 /*
@@ -37,9 +35,9 @@
  * there.  NOTE: all live arg registers must be locked prior to this call
  * to avoid having them allocated as a temp by downstream utilities.
  */
-RegLocation ArmCodegen::ArgLoc(CompilationUnit* cu, RegLocation loc)
+RegLocation ArmMir2Lir::ArgLoc(RegLocation loc)
 {
-  int arg_num = InPosition(cu, loc.s_reg_low);
+  int arg_num = InPosition(loc.s_reg_low);
   if (loc.wide) {
     if (arg_num == 2) {
       // Bad case - half in register, half in frame.  Just punt
@@ -67,16 +65,15 @@
  * the frame, we can't use the normal LoadValue() because it assumed
  * a proper frame - and we're frameless.
  */
-static RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
+RegLocation ArmMir2Lir::LoadArg(RegLocation loc)
 {
-  Codegen* cg = cu->cg.get();
   if (loc.location == kLocDalvikFrame) {
-    int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
-    loc.low_reg = AllocTemp(cu);
-    cg->LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
+    int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t);
+    loc.low_reg = AllocTemp();
+    LoadWordDisp(rARM_SP, start, loc.low_reg);
     if (loc.wide) {
-      loc.high_reg = AllocTemp(cu);
-      cg->LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
+      loc.high_reg = AllocTemp();
+      LoadWordDisp(rARM_SP, start + sizeof(uint32_t), loc.high_reg);
     }
     loc.location = kLocPhysReg;
   }
@@ -84,21 +81,22 @@
 }
 
 /* Lock any referenced arguments that arrive in registers */
-static void LockLiveArgs(CompilationUnit* cu, MIR* mir)
+void ArmMir2Lir::LockLiveArgs(MIR* mir)
 {
-  int first_in = cu->num_regs;
+  int first_in = cu_->num_regs;
   const int num_arg_regs = 3;  // TODO: generalize & move to RegUtil.cc
   for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-    int v_reg = cu->mir_graph->SRegToVReg(mir->ssa_rep->uses[i]);
+    int v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[i]);
     int InPosition = v_reg - first_in;
     if (InPosition < num_arg_regs) {
-      LockTemp(cu, rARM_ARG1 + InPosition);
+      LockTemp(rARM_ARG1 + InPosition);
     }
   }
 }
 
 /* Find the next MIR, which may be in a following basic block */
-static MIR* GetNextMir(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
+// TODO: should this be a utility in mir_graph?
+MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir)
 {
   BasicBlock* bb = *p_bb;
   MIR* orig_mir = mir;
@@ -124,105 +122,100 @@
 
 /* Used for the "verbose" listing */
 //TODO:  move to common code
-void ArmCodegen::GenPrintLabel(CompilationUnit *cu, MIR* mir)
+void ArmMir2Lir::GenPrintLabel(MIR* mir)
 {
   /* Mark the beginning of a Dalvik instruction for line tracking */
-  char* inst_str = cu->verbose ?
-     GetDalvikDisassembly(cu, mir) : NULL;
-  MarkBoundary(cu, mir->offset, inst_str);
+  char* inst_str = cu_->verbose ?
+     mir_graph_->GetDalvikDisassembly(mir) : NULL;
+  MarkBoundary(mir->offset, inst_str);
 }
 
-static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
-                        OpSize size, bool long_or_double, bool is_object)
+MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
+                             OpSize size, bool long_or_double, bool is_object)
 {
-  Codegen* cg = cu->cg.get();
   int field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
-  bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
   if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
     return NULL;
   }
-  RegLocation rl_obj = GetSrc(cu, mir, 0);
-  LockLiveArgs(cu, mir);
-  rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
+  RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
+  LockLiveArgs(mir);
+  rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
   RegLocation rl_dest;
   if (long_or_double) {
-    rl_dest = GetReturnWide(cu, false);
+    rl_dest = GetReturnWide(false);
   } else {
-    rl_dest = GetReturn(cu, false);
+    rl_dest = GetReturn(false);
   }
   // Point of no return - no aborts after this
-  ArmCodegen::GenPrintLabel(cu, mir);
-  rl_obj = LoadArg(cu, rl_obj);
-  cg->GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
-              long_or_double, is_object);
-  return GetNextMir(cu, bb, mir);
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_obj = LoadArg(rl_obj);
+  GenIGet(field_idx, mir->optimization_flags, size, rl_dest, rl_obj, long_or_double, is_object);
+  return GetNextMir(bb, mir);
 }
 
-static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
-                        OpSize size, bool long_or_double, bool is_object)
+MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
+                             OpSize size, bool long_or_double, bool is_object)
 {
-  Codegen* cg = cu->cg.get();
   int field_offset;
   bool is_volatile;
   uint32_t field_idx = mir->dalvikInsn.vC;
-  bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
   if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
     return NULL;
   }
   RegLocation rl_src;
   RegLocation rl_obj;
-  LockLiveArgs(cu, mir);
+  LockLiveArgs(mir);
   if (long_or_double) {
-    rl_src = GetSrcWide(cu, mir, 0);
-    rl_obj = GetSrc(cu, mir, 2);
+    rl_src = mir_graph_->GetSrcWide(mir, 0);
+    rl_obj = mir_graph_->GetSrc(mir, 2);
   } else {
-    rl_src = GetSrc(cu, mir, 0);
-    rl_obj = GetSrc(cu, mir, 1);
+    rl_src = mir_graph_->GetSrc(mir, 0);
+    rl_obj = mir_graph_->GetSrc(mir, 1);
   }
-  rl_src = ArmCodegen::ArgLoc(cu, rl_src);
-  rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
+  rl_src = ArmMir2Lir::ArgLoc(rl_src);
+  rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
   // Reject if source is split across registers & frame
   if (rl_obj.location == kLocInvalid) {
-    ResetRegPool(cu);
+    ResetRegPool();
     return NULL;
   }
   // Point of no return - no aborts after this
-  ArmCodegen::GenPrintLabel(cu, mir);
-  rl_obj = LoadArg(cu, rl_obj);
-  rl_src = LoadArg(cu, rl_src);
-  cg->GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
-              long_or_double, is_object);
-  return GetNextMir(cu, bb, mir);
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_obj = LoadArg(rl_obj);
+  rl_src = LoadArg(rl_src);
+  GenIPut(field_idx, mir->optimization_flags, size, rl_src, rl_obj, long_or_double, is_object);
+  return GetNextMir(bb, mir);
 }
 
-static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
+MIR* ArmMir2Lir::SpecialIdentity(MIR* mir)
 {
-  Codegen* cg = cu->cg.get();
   RegLocation rl_src;
   RegLocation rl_dest;
   bool wide = (mir->ssa_rep->num_uses == 2);
   if (wide) {
-    rl_src = GetSrcWide(cu, mir, 0);
-    rl_dest = GetReturnWide(cu, false);
+    rl_src = mir_graph_->GetSrcWide(mir, 0);
+    rl_dest = GetReturnWide(false);
   } else {
-    rl_src = GetSrc(cu, mir, 0);
-    rl_dest = GetReturn(cu, false);
+    rl_src = mir_graph_->GetSrc(mir, 0);
+    rl_dest = GetReturn(false);
   }
-  LockLiveArgs(cu, mir);
-  rl_src = ArmCodegen::ArgLoc(cu, rl_src);
+  LockLiveArgs(mir);
+  rl_src = ArmMir2Lir::ArgLoc(rl_src);
   if (rl_src.location == kLocInvalid) {
-    ResetRegPool(cu);
+    ResetRegPool();
     return NULL;
   }
   // Point of no return - no aborts after this
-  ArmCodegen::GenPrintLabel(cu, mir);
-  rl_src = LoadArg(cu, rl_src);
+  ArmMir2Lir::GenPrintLabel(mir);
+  rl_src = LoadArg(rl_src);
   if (wide) {
-    cg->StoreValueWide(cu, rl_dest, rl_src);
+    StoreValueWide(rl_dest, rl_src);
   } else {
-    cg->StoreValue(cu, rl_dest, rl_src);
+    StoreValue(rl_dest, rl_src);
   }
   return mir;
 }
@@ -230,10 +223,10 @@
 /*
  * Special-case code genration for simple non-throwing leaf methods.
  */
-void ArmCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case)
 {
-   cu->current_dalvik_offset = mir->offset;
+   current_dalvik_offset_ = mir->offset;
    MIR* next_mir = NULL;
    switch (special_case) {
      case kNullMethod:
@@ -241,67 +234,67 @@
        next_mir = mir;
        break;
      case kConstFunction:
-       ArmCodegen::GenPrintLabel(cu, mir);
-       LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
-       next_mir = GetNextMir(cu, &bb, mir);
+       ArmMir2Lir::GenPrintLabel(mir);
+       LoadConstant(rARM_RET0, mir->dalvikInsn.vB);
+       next_mir = GetNextMir(&bb, mir);
        break;
      case kIGet:
-       next_mir = SpecialIGet(cu, &bb, mir, kWord, false, false);
+       next_mir = SpecialIGet(&bb, mir, kWord, false, false);
        break;
      case kIGetBoolean:
      case kIGetByte:
-       next_mir = SpecialIGet(cu, &bb, mir, kUnsignedByte, false, false);
+       next_mir = SpecialIGet(&bb, mir, kUnsignedByte, false, false);
        break;
      case kIGetObject:
-       next_mir = SpecialIGet(cu, &bb, mir, kWord, false, true);
+       next_mir = SpecialIGet(&bb, mir, kWord, false, true);
        break;
      case kIGetChar:
-       next_mir = SpecialIGet(cu, &bb, mir, kUnsignedHalf, false, false);
+       next_mir = SpecialIGet(&bb, mir, kUnsignedHalf, false, false);
        break;
      case kIGetShort:
-       next_mir = SpecialIGet(cu, &bb, mir, kSignedHalf, false, false);
+       next_mir = SpecialIGet(&bb, mir, kSignedHalf, false, false);
        break;
      case kIGetWide:
-       next_mir = SpecialIGet(cu, &bb, mir, kLong, true, false);
+       next_mir = SpecialIGet(&bb, mir, kLong, true, false);
        break;
      case kIPut:
-       next_mir = SpecialIPut(cu, &bb, mir, kWord, false, false);
+       next_mir = SpecialIPut(&bb, mir, kWord, false, false);
        break;
      case kIPutBoolean:
      case kIPutByte:
-       next_mir = SpecialIPut(cu, &bb, mir, kUnsignedByte, false, false);
+       next_mir = SpecialIPut(&bb, mir, kUnsignedByte, false, false);
        break;
      case kIPutObject:
-       next_mir = SpecialIPut(cu, &bb, mir, kWord, false, true);
+       next_mir = SpecialIPut(&bb, mir, kWord, false, true);
        break;
      case kIPutChar:
-       next_mir = SpecialIPut(cu, &bb, mir, kUnsignedHalf, false, false);
+       next_mir = SpecialIPut(&bb, mir, kUnsignedHalf, false, false);
        break;
      case kIPutShort:
-       next_mir = SpecialIPut(cu, &bb, mir, kSignedHalf, false, false);
+       next_mir = SpecialIPut(&bb, mir, kSignedHalf, false, false);
        break;
      case kIPutWide:
-       next_mir = SpecialIPut(cu, &bb, mir, kLong, true, false);
+       next_mir = SpecialIPut(&bb, mir, kLong, true, false);
        break;
      case kIdentity:
-       next_mir = SpecialIdentity(cu, mir);
+       next_mir = SpecialIdentity(mir);
        break;
      default:
        return;
    }
    if (next_mir != NULL) {
-    cu->current_dalvik_offset = next_mir->offset;
+    current_dalvik_offset_ = next_mir->offset;
     if (special_case != kIdentity) {
-      ArmCodegen::GenPrintLabel(cu, next_mir);
+      ArmMir2Lir::GenPrintLabel(next_mir);
     }
-    NewLIR1(cu, kThumbBx, rARM_LR);
-    cu->core_spill_mask = 0;
-    cu->num_core_spills = 0;
-    cu->fp_spill_mask = 0;
-    cu->num_fp_spills = 0;
-    cu->frame_size = 0;
-    cu->core_vmap_table.clear();
-    cu->fp_vmap_table.clear();
+    NewLIR1(kThumbBx, rARM_LR);
+    core_spill_mask_ = 0;
+    num_core_spills_ = 0;
+    fp_spill_mask_ = 0;
+    num_fp_spills_ = 0;
+    frame_size_ = 0;
+    core_vmap_table_.clear();
+    fp_vmap_table_.clear();
   }
 }
 
@@ -324,28 +317,28 @@
  *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
  *   cbnz  r_idx, lp
  */
-void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
+  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Get the switch value
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  int rBase = AllocTemp(cu);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int rBase = AllocTemp();
   /* Allocate key and disp temps */
-  int r_key = AllocTemp(cu);
-  int r_disp = AllocTemp(cu);
+  int r_key = AllocTemp();
+  int r_disp = AllocTemp();
   // Make sure r_key's register number is less than r_disp's number for ldmia
   if (r_key > r_disp) {
     int tmp = r_disp;
@@ -353,69 +346,69 @@
     r_key = tmp;
   }
   // Materialize a pointer to the switch table
-  NewLIR3(cu, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
   // Set up r_idx
-  int r_idx = AllocTemp(cu);
-  LoadConstant(cu, r_idx, size);
+  int r_idx = AllocTemp();
+  LoadConstant(r_idx, size);
   // Establish loop branch target
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   // Load next key/disp
-  NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
-  OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
+  NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
+  OpRegReg(kOpCmp, r_key, rl_src.low_reg);
   // Go if match. NOTE: No instruction set switch here - must stay Thumb2
-  OpIT(cu, kCondEq, "");
-  LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
+  OpIT(kCondEq, "");
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
   tab_rec->anchor = switch_branch;
   // Needs to use setflags encoding here
-  NewLIR3(cu, kThumb2SubsRRI12, r_idx, r_idx, 1);
-  OpCondBranch(cu, kCondNe, target);
+  NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1);
+  OpCondBranch(kCondNe, target);
 }
 
 
-void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
+  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Get the switch value
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  int table_base = AllocTemp(cu);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int table_base = AllocTemp();
   // Materialize a pointer to the switch table
-  NewLIR3(cu, kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
   int low_key = s4FromSwitchData(&table[2]);
   int keyReg;
   // Remove the bias, if necessary
   if (low_key == 0) {
     keyReg = rl_src.low_reg;
   } else {
-    keyReg = AllocTemp(cu);
-    OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
+    keyReg = AllocTemp();
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
   }
   // Bounds check - if < 0 or >= size continue following switch
-  OpRegImm(cu, kOpCmp, keyReg, size-1);
-  LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
+  OpRegImm(kOpCmp, keyReg, size-1);
+  LIR* branch_over = OpCondBranch(kCondHi, NULL);
 
   // Load the displacement from the switch table
-  int disp_reg = AllocTemp(cu);
-  LoadBaseIndexed(cu, table_base, keyReg, disp_reg, 2, kWord);
+  int disp_reg = AllocTemp();
+  LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
 
   // ..and go! NOTE: No instruction set switch here - must stay Thumb2
-  LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, disp_reg);
+  LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg);
   tab_rec->anchor = switch_branch;
 
   /* branch_over target here */
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
 }
 
@@ -429,30 +422,30 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void ArmCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+      static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Making a call - use explicit registers
-  FlushAllRegs(cu);   /* Everything to home location */
-  LoadValueDirectFixed(cu, rl_src, r0);
-  LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+  FlushAllRegs();   /* Everything to home location */
+  LoadValueDirectFixed(rl_src, r0);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
                rARM_LR);
   // Materialize a pointer to the fill data image
-  NewLIR3(cu, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
-  MarkSafepointPC(cu, call_inst);
+  NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
 }
 
 /*
@@ -481,33 +474,33 @@
  * preserved.
  *
  */
-void ArmCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
+  FlushAllRegs();
   DCHECK_EQ(LW_SHAPE_THIN, 0);
-  LoadValueDirectFixed(cu, rl_src, r0);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
-  LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
-  NewLIR3(cu, kThumb2Ldrex, r1, r0,
+  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+  LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+  NewLIR3(kThumb2Ldrex, r1, r0,
           mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
   // Align owner
-  OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+  OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
   // Is lock unheld on lock or held by us (==thread_id) on unlock?
-  NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
-  NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
-  OpRegImm(cu, kOpCmp, r1, 0);
-  OpIT(cu, kCondEq, "");
-  NewLIR4(cu, kThumb2Strex, r1, r2, r0,
+  NewLIR4(kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+  NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+  OpRegImm(kOpCmp, r1, 0);
+  OpIT(kCondEq, "");
+  NewLIR4(kThumb2Strex, r1, r2, r0,
           mirror::Object::MonitorOffset().Int32Value() >> 2);
-  OpRegImm(cu, kOpCmp, r1, 0);
-  OpIT(cu, kCondNe, "T");
+  OpRegImm(kOpCmp, r1, 0);
+  OpIT(kCondNe, "T");
   // Go expensive route - artLockObjectFromCode(self, obj);
-  LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
-  MarkSafepointPC(cu, call_inst);
-  GenMemBarrier(cu, kLoadLoad);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
+  GenMemBarrier(kLoadLoad);
 }
 
 /*
@@ -516,140 +509,140 @@
  * a zero recursion count, it's safe to punch it back to the
  * initial, unlock thin state with a store word.
  */
-void ArmCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
 {
   DCHECK_EQ(LW_SHAPE_THIN, 0);
-  FlushAllRegs(cu);
-  LoadValueDirectFixed(cu, rl_src, r0);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
-  LoadWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
-  LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, r0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+  LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+  LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
   // Is lock unheld on lock or held by us (==thread_id) on unlock?
-  OpRegRegImm(cu, kOpAnd, r3, r1,
+  OpRegRegImm(kOpAnd, r3, r1,
               (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
   // Align owner
-  OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
-  NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
-  OpRegReg(cu, kOpSub, r1, r2);
-  OpIT(cu, kCondEq, "EE");
-  StoreWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+  OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+  NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+  OpRegReg(kOpSub, r1, r2);
+  OpIT(kCondEq, "EE");
+  StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
   // Go expensive route - UnlockObjectFromCode(obj);
-  LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
-  MarkSafepointPC(cu, call_inst);
-  GenMemBarrier(cu, kStoreLoad);
+  LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, rARM_LR);
+  MarkSafepointPC(call_inst);
+  GenMemBarrier(kStoreLoad);
 }
 
-void ArmCodegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+void ArmMir2Lir::GenMoveException(RegLocation rl_dest)
 {
   int ex_offset = Thread::ExceptionOffset().Int32Value();
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int reset_reg = AllocTemp(cu);
-  LoadWordDisp(cu, rARM_SELF, ex_offset, rl_result.low_reg);
-  LoadConstant(cu, reset_reg, 0);
-  StoreWordDisp(cu, rARM_SELF, ex_offset, reset_reg);
-  FreeTemp(cu, reset_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int reset_reg = AllocTemp();
+  LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
+  LoadConstant(reset_reg, 0);
+  StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
+  FreeTemp(reset_reg);
+  StoreValue(rl_dest, rl_result);
 }
 
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void ArmCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
 {
-  int reg_card_base = AllocTemp(cu);
-  int reg_card_no = AllocTemp(cu);
-  LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
-  LoadWordDisp(cu, rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
-  OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
-  StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
-  FreeTemp(cu, reg_card_base);
-  FreeTemp(cu, reg_card_no);
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
 }
 
-void ArmCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
 {
-  int spill_count = cu->num_core_spills + cu->num_fp_spills;
+  int spill_count = num_core_spills_ + num_fp_spills_;
   /*
    * On entry, r0, r1, r2 & r3 are live.  Let the register allocation
    * mechanism know so it doesn't try to use any of them when
    * expanding the frame or flushing.  This leaves the utility
    * code with a single temp: r12.  This should be enough.
    */
-  LockTemp(cu, r0);
-  LockTemp(cu, r1);
-  LockTemp(cu, r2);
-  LockTemp(cu, r3);
+  LockTemp(r0);
+  LockTemp(r1);
+  LockTemp(r2);
+  LockTemp(r3);
 
   /*
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
-                            (static_cast<size_t>(cu->frame_size) <
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+                            (static_cast<size_t>(frame_size_) <
                             Thread::kStackOverflowReservedBytes));
-  NewLIR0(cu, kPseudoMethodEntry);
+  NewLIR0(kPseudoMethodEntry);
   if (!skip_overflow_check) {
     /* Load stack limit */
-    LoadWordDisp(cu, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+    LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
   }
   /* Spill core callee saves */
-  NewLIR1(cu, kThumb2Push, cu->core_spill_mask);
+  NewLIR1(kThumb2Push, core_spill_mask_);
   /* Need to spill any FP regs? */
-  if (cu->num_fp_spills) {
+  if (num_fp_spills_) {
     /*
      * NOTE: fp spills are a little different from core spills in that
      * they are pushed as a contiguous block.  When promoting from
      * the fp set, we must allocate all singles from s16..highest-promoted
      */
-    NewLIR1(cu, kThumb2VPushCS, cu->num_fp_spills);
+    NewLIR1(kThumb2VPushCS, num_fp_spills_);
   }
   if (!skip_overflow_check) {
-    OpRegRegImm(cu, kOpSub, rARM_LR, rARM_SP, cu->frame_size - (spill_count * 4));
-    GenRegRegCheck(cu, kCondCc, rARM_LR, r12, kThrowStackOverflow);
-    OpRegCopy(cu, rARM_SP, rARM_LR);     // Establish stack
+    OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
+    GenRegRegCheck(kCondCc, rARM_LR, r12, kThrowStackOverflow);
+    OpRegCopy(rARM_SP, rARM_LR);     // Establish stack
   } else {
-    OpRegImm(cu, kOpSub, rARM_SP, cu->frame_size - (spill_count * 4));
+    OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
   }
 
-  FlushIns(cu, ArgLocs, rl_method);
+  FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(cu, r0);
-  FreeTemp(cu, r1);
-  FreeTemp(cu, r2);
-  FreeTemp(cu, r3);
+  FreeTemp(r0);
+  FreeTemp(r1);
+  FreeTemp(r2);
+  FreeTemp(r3);
 }
 
-void ArmCodegen::GenExitSequence(CompilationUnit* cu)
+void ArmMir2Lir::GenExitSequence()
 {
-  int spill_count = cu->num_core_spills + cu->num_fp_spills;
+  int spill_count = num_core_spills_ + num_fp_spills_;
   /*
    * In the exit path, r0/r1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(cu, r0);
-  LockTemp(cu, r1);
+  LockTemp(r0);
+  LockTemp(r1);
 
-  NewLIR0(cu, kPseudoMethodExit);
-  OpRegImm(cu, kOpAdd, rARM_SP, cu->frame_size - (spill_count * 4));
+  NewLIR0(kPseudoMethodExit);
+  OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4));
   /* Need to restore any FP callee saves? */
-  if (cu->num_fp_spills) {
-    NewLIR1(cu, kThumb2VPopCS, cu->num_fp_spills);
+  if (num_fp_spills_) {
+    NewLIR1(kThumb2VPopCS, num_fp_spills_);
   }
-  if (cu->core_spill_mask & (1 << rARM_LR)) {
+  if (core_spill_mask_ & (1 << rARM_LR)) {
     /* Unspill rARM_LR to rARM_PC */
-    cu->core_spill_mask &= ~(1 << rARM_LR);
-    cu->core_spill_mask |= (1 << rARM_PC);
+    core_spill_mask_ &= ~(1 << rARM_LR);
+    core_spill_mask_ |= (1 << rARM_PC);
   }
-  NewLIR1(cu, kThumb2Pop, cu->core_spill_mask);
-  if (!(cu->core_spill_mask & (1 << rARM_PC))) {
+  NewLIR1(kThumb2Pop, core_spill_mask_);
+  if (!(core_spill_mask_ & (1 << rARM_PC))) {
     /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
-    NewLIR1(cu, kThumbBx, rARM_LR);
+    NewLIR1(kThumbBx, rARM_LR);
   }
 }
 
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index 29aef0e..df9451a 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -21,64 +21,60 @@
 
 namespace art {
 
-class ArmCodegen : public Codegen {
+class ArmMir2Lir : public Mir2Lir {
   public:
+
+    ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+
     // Required for target - codegen helpers.
-    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit);
-    virtual int LoadHelper(CompilationUnit* cu, int offset);
-    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                              OpSize size, int s_reg);
-    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
-                                  int r_dest_hi, int s_reg);
-    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
-                                 OpSize size);
-    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
-                                     int s_reg);
-    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
-    virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
-    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                               OpSize size);
-    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
-                                   int r_src_hi);
-    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
-                                 OpSize size);
-    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                      int displacement, int r_src, int r_src_hi, OpSize size,
-                                      int s_reg);
-    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+    virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    virtual int LoadHelper(int offset);
+    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    virtual LIR* LoadConstantNoClobber(int r_dest, int value);
+    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
 
     // Required for target - register utilities.
     virtual bool IsFpReg(int reg);
     virtual bool SameRegType(int reg1, int reg2);
-    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
-    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTemp(bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
     virtual int S2d(int low_reg, int high_reg);
     virtual int TargetReg(SpecialTargetRegister reg);
-    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
-    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
-    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegisterInfo* GetRegInfo(int reg);
+    virtual RegLocation GetReturnAlt();
+    virtual RegLocation GetReturnWideAlt();
     virtual RegLocation LocCReturn();
     virtual RegLocation LocCReturnDouble();
     virtual RegLocation LocCReturnFloat();
     virtual RegLocation LocCReturnWide();
     virtual uint32_t FpRegMask();
-    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
-    virtual void AdjustSpillMask(CompilationUnit* cu);
-    virtual void ClobberCalleeSave(CompilationUnit *cu);
-    virtual void FlushReg(CompilationUnit* cu, int reg);
-    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-    virtual void FreeCallTemps(CompilationUnit* cu);
-    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
-    virtual void LockCallTemps(CompilationUnit* cu);
-    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
-    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+    virtual uint64_t GetRegMaskCommon(int reg);
+    virtual void AdjustSpillMask();
+    virtual void ClobberCalleeSave();
+    virtual void FlushReg(int reg);
+    virtual void FlushRegWide(int reg1, int reg2);
+    virtual void FreeCallTemps();
+    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps();
+    virtual void MarkPreservedSingle(int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc();
 
     // Required for target - miscellaneous.
-    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
     virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual void SetupTargetResourceMasks(LIR* lir);
     virtual const char* GetTargetInstFmt(int opcode);
     virtual const char* GetTargetInstName(int opcode);
     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
@@ -88,129 +84,111 @@
     virtual bool IsUnconditionalBranch(LIR* lir);
 
     // Required for target - Dalvik-level generators.
-    virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
-                                RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+                                RegLocation rl_src, int scale);
+    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_dest, int scale);
-    virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
-    virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2);
-    virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
                                  RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src);
-    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
-    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
-    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
-    virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2);
-    virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
-                                int offset, ThrowKind kind);
-    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
-                                  bool is_div);
-    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
-                                     bool is_div);
-    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
-    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                                  RegLocation rl_method);
-    virtual void GenExitSequence(CompilationUnit* cu);
-    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                                  RegLocation rl_src);
-    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double);
-    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
-    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
-    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                               RegLocation rl_result, int lit, int first_bit,
-                                               int second_bit);
-    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                                SpecialCaseHandler special_case);
+    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CallInfo* info);
+    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    virtual void GenExitSequence();
+    virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    virtual void GenMoveException(RegLocation rl_dest);
+    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit);
+    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
 
     // Required for target - single operation generators.
-    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
-                             LIR* target);
-    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
-                                LIR* target);
-    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
-    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
-                                LIR* target);
-    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
-    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
-    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
-    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
-    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
-    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
-    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
-    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
-    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                             int r_src2);
-    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
-    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
-    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
-    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
-                       int offset);
-    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
-                               int src_hi);
-    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+    virtual LIR* OpUnconditionalBranch(LIR* target);
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    virtual LIR* OpFpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpIT(ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(int reg, LIR* target);
+    virtual LIR* OpReg(OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    virtual LIR* OpTestSuspend(LIR* target);
+    virtual LIR* OpThreadMem(OpKind op, int thread_offset);
+    virtual LIR* OpVldm(int rBase, int count);
+    virtual LIR* OpVstm(int rBase, int count);
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    virtual void OpTlsCmp(int offset, int val);
 
-    static RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc);
-    LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                          int r_dest_hi, OpSize size, int s_reg);
-    LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                           int r_src_hi, OpSize size);
-    static void GenPrintLabel(CompilationUnit *cu, MIR* mir);
-    static LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                                 int r_src2, int shift);
-    static LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
-                              int shift);
+    RegLocation ArgLoc(RegLocation loc);
+    LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+                          int s_reg);
+    LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+    void GenPrintLabel(MIR* mir);
+    LIR* OpRegRegRegShift(OpKind op, int r_dest, int r_src1, int r_src2, int shift);
+    LIR* OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, int shift);
     static const ArmEncodingMap EncodingMap[kArmLast];
-    static int EncodeShift(int code, int amount);
-    static int ModifiedImmediate(uint32_t value);
-    static ArmConditionCode ArmConditionEncoding(ConditionCode code);
+    int EncodeShift(int code, int amount);
+    int ModifiedImmediate(uint32_t value);
+    ArmConditionCode ArmConditionEncoding(ConditionCode code);
     bool InexpensiveConstantInt(int32_t value);
     bool InexpensiveConstantFloat(int32_t value);
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
 
   private:
-    void GenFusedLongCmpImmBranch(CompilationUnit* cu, BasicBlock* bb, RegLocation rl_src1,
-                                  int64_t val, ConditionCode ccode);
+    void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
+                                  ConditionCode ccode);
+    int InPosition(int s_reg);
+    RegLocation LoadArg(RegLocation loc);
+    void LockLiveArgs(MIR* mir);
+    MIR* GetNextMir(BasicBlock** p_bb, MIR* mir);
+    MIR* SpecialIGet(BasicBlock** bb, MIR* mir, OpSize size, bool long_or_double, bool is_object);
+    MIR* SpecialIPut(BasicBlock** bb, MIR* mir, OpSize size, bool long_or_double, bool is_object);
+    MIR* SpecialIdentity(MIR* mir);
+    LIR* LoadFPConstantValue(int r_dest, int value);
+    bool BadOverlap(RegLocation rl_src, RegLocation rl_dest);
 };
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/arm/fp_arm.cc b/src/compiler/dex/quick/arm/fp_arm.cc
index cc65217..4bf8738 100644
--- a/src/compiler/dex/quick/arm/fp_arm.cc
+++ b/src/compiler/dex/quick/arm/fp_arm.cc
@@ -16,12 +16,10 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 
 namespace art {
 
-void ArmCodegen::GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
                                  RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kThumbBkpt;
@@ -50,25 +48,25 @@
       break;
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
-      rl_result = GetReturn(cu, true);
-      StoreValue(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
       return;
     case Instruction::NEG_FLOAT:
-      GenNegFloat(cu, rl_dest, rl_src1);
+      GenNegFloat(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-  rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-  rl_src2 = LoadValue(cu, rl_src2, kFPReg);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-  NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  StoreValue(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kThumbBkpt;
@@ -93,31 +91,31 @@
       break;
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(cu, true);
-      StoreValueWide(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
       return;
     case Instruction::NEG_DOUBLE:
-      GenNegDouble(cu, rl_dest, rl_src1);
+      GenNegDouble(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
 
-  rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
   DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
   DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+  NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
           S2d(rl_src2.low_reg, rl_src2.high_reg));
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenConversion(CompilationUnit* cu, Instruction::Code opcode,
+void ArmMir2Lir::GenConversion(Instruction::Code opcode,
                                RegLocation rl_dest, RegLocation rl_src)
 {
   int op = kThumbBkpt;
@@ -144,60 +142,59 @@
       op = kThumb2VcvtDI;
       break;
     case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
       return;
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_FLOAT:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
   if (rl_src.wide) {
-    rl_src = LoadValueWide(cu, rl_src, kFPReg);
+    rl_src = LoadValueWide(rl_src, kFPReg);
     src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
   } else {
-    rl_src = LoadValue(cu, rl_src, kFPReg);
+    rl_src = LoadValue(rl_src, kFPReg);
     src_reg = rl_src.low_reg;
   }
   if (rl_dest.wide) {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
-    StoreValueWide(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, rl_result.low_reg, src_reg);
-    StoreValue(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
-void ArmCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double)
 {
-  LIR* label_list = cu->block_label_list;
-  LIR* target = &label_list[bb->taken->id];
+  LIR* target = &block_label_list_[bb->taken->id];
   RegLocation rl_src1;
   RegLocation rl_src2;
   if (is_double) {
-    rl_src1 = GetSrcWide(cu, mir, 0);
-    rl_src2 = GetSrcWide(cu, mir, 2);
-    rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
-    NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
             S2d(rl_src2.low_reg, rl_src2.high_reg));
   } else {
-    rl_src1 = GetSrc(cu, mir, 0);
-    rl_src2 = GetSrc(cu, mir, 1);
-    rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValue(cu, rl_src2, kFPReg);
-    NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+    rl_src1 = mir_graph_->GetSrc(mir, 0);
+    rl_src2 = mir_graph_->GetSrc(mir, 1);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
   }
-  NewLIR0(cu, kThumb2Fmstat);
+  NewLIR0(kThumb2Fmstat);
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
   switch(ccode) {
     case kCondEq:
@@ -226,11 +223,11 @@
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpCondBranch(cu, ccode, target);
+  OpCondBranch(ccode, target);
 }
 
 
-void ArmCodegen::GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                           RegLocation rl_src1, RegLocation rl_src2)
 {
   bool is_double = false;
@@ -258,78 +255,78 @@
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
   if (is_double) {
-    rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
     // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(cu, rl_dest.s_reg_low);
-    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    LoadConstant(cu, rl_result.low_reg, default_result);
-    NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+    ClobberSReg(rl_dest.s_reg_low);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadConstant(rl_result.low_reg, default_result);
+    NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
             S2d(rl_src2.low_reg, rl_src2.high_reg));
   } else {
-    rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
     // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(cu, rl_dest.s_reg_low);
-    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    LoadConstant(cu, rl_result.low_reg, default_result);
-    NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+    ClobberSReg(rl_dest.s_reg_low);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadConstant(rl_result.low_reg, default_result);
+    NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
   }
   DCHECK(!ARM_FPREG(rl_result.low_reg));
-  NewLIR0(cu, kThumb2Fmstat);
+  NewLIR0(kThumb2Fmstat);
 
-  OpIT(cu, (default_result == -1) ? kCondGt : kCondMi, "");
-  NewLIR2(cu, kThumb2MovImmShift, rl_result.low_reg,
+  OpIT((default_result == -1) ? kCondGt : kCondMi, "");
+  NewLIR2(kThumb2MovImmShift, rl_result.low_reg,
           ModifiedImmediate(-default_result)); // Must not alter ccodes
-  GenBarrier(cu);
+  GenBarrier();
 
-  OpIT(cu, kCondEq, "");
-  LoadConstant(cu, rl_result.low_reg, 0);
-  GenBarrier(cu);
+  OpIT(kCondEq, "");
+  LoadConstant(rl_result.low_reg, 0);
+  GenBarrier();
 
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValue(cu, rl_src, kFPReg);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-  NewLIR2(cu, kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src = LoadValue(rl_src, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+  StoreValue(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValueWide(cu, rl_src, kFPReg);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-  NewLIR2(cu, kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
           S2d(rl_src.low_reg, rl_src.high_reg));
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-bool ArmCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
-  DCHECK_EQ(cu->instruction_set, kThumb2);
+bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
   LIR *branch;
   RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(cu, info);  // double place for result
-  rl_src = LoadValueWide(cu, rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-  NewLIR2(cu, kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
           S2d(rl_src.low_reg, rl_src.high_reg));
-  NewLIR2(cu, kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+  NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
           S2d(rl_result.low_reg, rl_result.high_reg));
-  NewLIR0(cu, kThumb2Fmstat);
-  branch = NewLIR2(cu, kThumbBCond, 0, kArmCondEq);
-  ClobberCalleeSave(cu);
-  LockCallTemps(cu);  // Using fixed registers
-  int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pSqrt));
-  NewLIR3(cu, kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
-  NewLIR1(cu, kThumbBlxR, r_tgt);
-  NewLIR3(cu, kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
-  branch->target = NewLIR0(cu, kPseudoTargetLabel);
-  StoreValueWide(cu, rl_dest, rl_result);
+  NewLIR0(kThumb2Fmstat);
+  branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt));
+  NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+  NewLIR1(kThumbBlxR, r_tgt);
+  NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  StoreValueWide(rl_dest, rl_result);
   return true;
 }
 
diff --git a/src/compiler/dex/quick/arm/int_arm.cc b/src/compiler/dex/quick/arm/int_arm.cc
index 0ebc943..4133aea 100644
--- a/src/compiler/dex/quick/arm/int_arm.cc
+++ b/src/compiler/dex/quick/arm/int_arm.cc
@@ -18,17 +18,15 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 
 namespace art {
 
-LIR* ArmCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
+LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1,
          int src2, LIR* target)
 {
-  OpRegReg(cu, kOpCmp, src1, src2);
-  return OpCondBranch(cu, cond, target);
+  OpRegReg(kOpCmp, src1, src2);
+  return OpCondBranch(cond, target);
 }
 
 /*
@@ -41,7 +39,7 @@
  * met, and an "E" means the instruction is executed if the condition
  * is not met.
  */
-LIR* ArmCodegen::OpIT(CompilationUnit* cu, ConditionCode ccode, const char* guide)
+LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide)
 {
   int mask;
   int mask3 = 0;
@@ -67,7 +65,7 @@
   }
   mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
        (1 << (3 - strlen(guide)));
-  return NewLIR2(cu, kThumb2It, code, mask);
+  return NewLIR2(kThumb2It, code, mask);
 }
 
 /*
@@ -85,52 +83,51 @@
  *     neg   rX
  * done:
  */
-void ArmCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LIR* target1;
   LIR* target2;
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  int t_reg = AllocTemp(cu);
-  LoadConstant(cu, t_reg, -1);
-  OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
-  LIR* branch1 = OpCondBranch(cu, kCondLt, NULL);
-  LIR* branch2 = OpCondBranch(cu, kCondGt, NULL);
-  OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
-  LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  int t_reg = AllocTemp();
+  LoadConstant(t_reg, -1);
+  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+  LIR* branch1 = OpCondBranch(kCondLt, NULL);
+  LIR* branch2 = OpCondBranch(kCondGt, NULL);
+  OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+  LIR* branch3 = OpCondBranch(kCondEq, NULL);
 
-  OpIT(cu, kCondHi, "E");
-  NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
-  LoadConstant(cu, t_reg, 1);
-  GenBarrier(cu);
+  OpIT(kCondHi, "E");
+  NewLIR2(kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
+  LoadConstant(t_reg, 1);
+  GenBarrier();
 
-  target2 = NewLIR0(cu, kPseudoTargetLabel);
-  OpRegReg(cu, kOpNeg, t_reg, t_reg);
+  target2 = NewLIR0(kPseudoTargetLabel);
+  OpRegReg(kOpNeg, t_reg, t_reg);
 
-  target1 = NewLIR0(cu, kPseudoTargetLabel);
+  target1 = NewLIR0(kPseudoTargetLabel);
 
   RegLocation rl_temp = LocCReturn(); // Just using as template, will change
   rl_temp.low_reg = t_reg;
-  StoreValue(cu, rl_dest, rl_temp);
-  FreeTemp(cu, t_reg);
+  StoreValue(rl_dest, rl_temp);
+  FreeTemp(t_reg);
 
   branch1->target = target1;
   branch2->target = target2;
   branch3->target = branch1->target;
 }
 
-void ArmCodegen::GenFusedLongCmpImmBranch(CompilationUnit* cu, BasicBlock* bb, RegLocation rl_src1,
+void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
                                           int64_t val, ConditionCode ccode)
 {
   int32_t val_lo = Low32Bits(val);
   int32_t val_hi = High32Bits(val);
   DCHECK(ModifiedImmediate(val_lo) >= 0);
   DCHECK(ModifiedImmediate(val_hi) >= 0);
-  LIR* label_list = cu->block_label_list;
-  LIR* taken = &label_list[bb->taken->id];
-  LIR* not_taken = &label_list[bb->fall_through->id];
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   int32_t low_reg = rl_src1.low_reg;
   int32_t high_reg = rl_src1.high_reg;
 
@@ -147,51 +144,51 @@
         condition = kCondNe;
       }
       if (val == 0) {
-        int t_reg = AllocTemp(cu);
-        NewLIR4(cu, kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
-        FreeTemp(cu, t_reg);
-        OpCondBranch(cu, condition, taken);
+        int t_reg = AllocTemp();
+        NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+        FreeTemp(t_reg);
+        OpCondBranch(condition, taken);
         return;
       }
-      OpCmpImmBranch(cu, kCondNe, high_reg, val_hi, target);
+      OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
       break;
     case kCondLt:
-      OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, taken);
-      OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, not_taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
       ccode = kCondCc;
       break;
     case kCondLe:
-      OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, taken);
-      OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, not_taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
       ccode = kCondLs;
       break;
     case kCondGt:
-      OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, taken);
-      OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, not_taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
       ccode = kCondHi;
       break;
     case kCondGe:
-      OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, taken);
-      OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, not_taken);
+      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
+      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
       ccode = kCondCs;
       break;
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpCmpImmBranch(cu, ccode, low_reg, val_lo, taken);
+  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
 }
 
-void ArmCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
 {
   RegLocation rl_result;
-  RegLocation rl_src = GetSrc(cu, mir, 0);
+  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   // Temporary debugging code
   int dest_sreg = mir->ssa_rep->defs[0];
-  if ((dest_sreg < 0) || (dest_sreg >= cu->mir_graph->GetNumSSARegs())) {
+  if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
     LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
-              << PrettyMethod(cu->method_idx,*cu->dex_file);
+              << PrettyMethod(cu_->method_idx,*cu_->dex_file);
     LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
-    LOG(INFO) << "vreg = " << cu->mir_graph->SRegToVReg(dest_sreg);
+    LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
     LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
     if (mir->ssa_rep->num_uses == 1) {
       LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
@@ -202,58 +199,58 @@
     CHECK(false) << "Invalid target sreg on Select.";
   }
   // End temporary debugging code
-  RegLocation rl_dest = GetDest(cu, mir);
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  RegLocation rl_dest = mir_graph_->GetDest(mir);
+  rl_src = LoadValue(rl_src, kCoreReg);
   if (mir->ssa_rep->num_uses == 1) {
     // CONST case
     int true_val = mir->dalvikInsn.vB;
     int false_val = mir->dalvikInsn.vC;
-    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if ((true_val == 1) && (false_val == 0)) {
-      OpRegRegImm(cu, kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
-      OpIT(cu, kCondCc, "");
-      LoadConstant(cu, rl_result.low_reg, 0);
-      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+      OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
+      OpIT(kCondCc, "");
+      LoadConstant(rl_result.low_reg, 0);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
     } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
-      OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
-      OpIT(cu, kCondEq, "E");
-      LoadConstant(cu, rl_result.low_reg, true_val);
-      LoadConstant(cu, rl_result.low_reg, false_val);
-      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+      OpRegImm(kOpCmp, rl_src.low_reg, 0);
+      OpIT(kCondEq, "E");
+      LoadConstant(rl_result.low_reg, true_val);
+      LoadConstant(rl_result.low_reg, false_val);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
     } else {
       // Unlikely case - could be tuned.
-      int t_reg1 = AllocTemp(cu);
-      int t_reg2 = AllocTemp(cu);
-      LoadConstant(cu, t_reg1, true_val);
-      LoadConstant(cu, t_reg2, false_val);
-      OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
-      OpIT(cu, kCondEq, "E");
-      OpRegCopy(cu, rl_result.low_reg, t_reg1);
-      OpRegCopy(cu, rl_result.low_reg, t_reg2);
-      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+      int t_reg1 = AllocTemp();
+      int t_reg2 = AllocTemp();
+      LoadConstant(t_reg1, true_val);
+      LoadConstant(t_reg2, false_val);
+      OpRegImm(kOpCmp, rl_src.low_reg, 0);
+      OpIT(kCondEq, "E");
+      OpRegCopy(rl_result.low_reg, t_reg1);
+      OpRegCopy(rl_result.low_reg, t_reg2);
+      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
     }
   } else {
     // MOVE case
-    RegLocation rl_true = cu->reg_location[mir->ssa_rep->uses[1]];
-    RegLocation rl_false = cu->reg_location[mir->ssa_rep->uses[2]];
-    rl_true = LoadValue(cu, rl_true, kCoreReg);
-    rl_false = LoadValue(cu, rl_false, kCoreReg);
-    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
-    OpIT(cu, kCondEq, "E");
-    LIR* l1 = OpRegCopy(cu, rl_result.low_reg, rl_true.low_reg);
+    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
+    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
+    rl_true = LoadValue(rl_true, kCoreReg);
+    rl_false = LoadValue(rl_false, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegImm(kOpCmp, rl_src.low_reg, 0);
+    OpIT(kCondEq, "E");
+    LIR* l1 = OpRegCopy(rl_result.low_reg, rl_true.low_reg);
     l1->flags.is_nop = false;  // Make sure this instruction isn't optimized away
-    LIR* l2 = OpRegCopy(cu, rl_result.low_reg, rl_false.low_reg);
+    LIR* l2 = OpRegCopy(rl_result.low_reg, rl_false.low_reg);
     l2->flags.is_nop = false;  // Make sure this instruction isn't optimized away
-    GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+    GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir)
 {
-  RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
-  RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
   // Normalize such that if either operand is constant, src2 will be constant.
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
   if (rl_src1.is_const) {
@@ -263,60 +260,59 @@
     ccode = FlipComparisonOrder(ccode);
   }
   if (rl_src2.is_const) {
-    RegLocation rl_temp = UpdateLocWide(cu, rl_src2);
+    RegLocation rl_temp = UpdateLocWide(rl_src2);
     // Do special compare/branch against simple const operand if not already in registers.
-    int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
+    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
     if ((rl_temp.location != kLocPhysReg) &&
         ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
-      GenFusedLongCmpImmBranch(cu, bb, rl_src1, val, ccode);
+      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
       return;
     }
   }
-  LIR* label_list = cu->block_label_list;
-  LIR* taken = &label_list[bb->taken->id];
-  LIR* not_taken = &label_list[bb->fall_through->id];
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
   switch(ccode) {
     case kCondEq:
-      OpCondBranch(cu, kCondNe, not_taken);
+      OpCondBranch(kCondNe, not_taken);
       break;
     case kCondNe:
-      OpCondBranch(cu, kCondNe, taken);
+      OpCondBranch(kCondNe, taken);
       break;
     case kCondLt:
-      OpCondBranch(cu, kCondLt, taken);
-      OpCondBranch(cu, kCondGt, not_taken);
+      OpCondBranch(kCondLt, taken);
+      OpCondBranch(kCondGt, not_taken);
       ccode = kCondCc;
       break;
     case kCondLe:
-      OpCondBranch(cu, kCondLt, taken);
-      OpCondBranch(cu, kCondGt, not_taken);
+      OpCondBranch(kCondLt, taken);
+      OpCondBranch(kCondGt, not_taken);
       ccode = kCondLs;
       break;
     case kCondGt:
-      OpCondBranch(cu, kCondGt, taken);
-      OpCondBranch(cu, kCondLt, not_taken);
+      OpCondBranch(kCondGt, taken);
+      OpCondBranch(kCondLt, not_taken);
       ccode = kCondHi;
       break;
     case kCondGe:
-      OpCondBranch(cu, kCondGt, taken);
-      OpCondBranch(cu, kCondLt, not_taken);
+      OpCondBranch(kCondGt, taken);
+      OpCondBranch(kCondLt, not_taken);
       ccode = kCondCs;
       break;
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
-  OpCondBranch(cu, ccode, taken);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  OpCondBranch(ccode, taken);
 }
 
 /*
  * Generate a register comparison to an immediate and branch.  Caller
  * is responsible for setting branch target field.
  */
-LIR* ArmCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
                                 LIR* target)
 {
   LIR* branch;
@@ -324,31 +320,31 @@
   ArmConditionCode arm_cond = ArmConditionEncoding(cond);
   if ((ARM_LOWREG(reg)) && (check_value == 0) &&
      ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
-    branch = NewLIR2(cu, (arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+    branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
                      reg, 0);
   } else {
     mod_imm = ModifiedImmediate(check_value);
     if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
-      NewLIR2(cu, kThumbCmpRI8, reg, check_value);
+      NewLIR2(kThumbCmpRI8, reg, check_value);
     } else if (mod_imm >= 0) {
-      NewLIR2(cu, kThumb2CmpRI12, reg, mod_imm);
+      NewLIR2(kThumb2CmpRI12, reg, mod_imm);
     } else {
-      int t_reg = AllocTemp(cu);
-      LoadConstant(cu, t_reg, check_value);
-      OpRegReg(cu, kOpCmp, reg, t_reg);
+      int t_reg = AllocTemp();
+      LoadConstant(t_reg, check_value);
+      OpRegReg(kOpCmp, reg, t_reg);
     }
-    branch = NewLIR2(cu, kThumbBCond, 0, arm_cond);
+    branch = NewLIR2(kThumbBCond, 0, arm_cond);
   }
   branch->target = target;
   return branch;
 }
 
-LIR* ArmCodegen::OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
+LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
 {
   LIR* res;
   int opcode;
   if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
-    return OpFpRegCopy(cu, r_dest, r_src);
+    return OpFpRegCopy(r_dest, r_src);
   if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
     opcode = kThumbMovRR;
   else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
@@ -357,21 +353,21 @@
      opcode = kThumbMovRR_H2L;
   else
      opcode = kThumbMovRR_L2H;
-  res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
-  if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+  res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-LIR* ArmCodegen::OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src)
 {
-  LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
-  AppendLIR(cu, res);
+  LIR* res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
   return res;
 }
 
-void ArmCodegen::OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
                                int src_hi)
 {
   bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
@@ -380,21 +376,21 @@
   DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
   if (dest_fp) {
     if (src_fp) {
-      OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
     } else {
-      NewLIR3(cu, kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
+      NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
     }
   } else {
     if (src_fp) {
-      NewLIR3(cu, kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
+      NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
     } else {
       // Handle overlap
       if (src_hi == dest_lo) {
-        OpRegCopy(cu, dest_hi, src_hi);
-        OpRegCopy(cu, dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
       } else {
-        OpRegCopy(cu, dest_lo, src_lo);
-        OpRegCopy(cu, dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
       }
     }
   }
@@ -427,7 +423,7 @@
 };
 
 // Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool ArmCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
                                     RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
@@ -442,122 +438,122 @@
     return false;
   }
 
-  int r_magic = AllocTemp(cu);
-  LoadConstant(cu, r_magic, magic_table[lit].magic);
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int r_hi = AllocTemp(cu);
-  int r_lo = AllocTemp(cu);
-  NewLIR4(cu, kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
+  int r_magic = AllocTemp();
+  LoadConstant(r_magic, magic_table[lit].magic);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int r_hi = AllocTemp();
+  int r_lo = AllocTemp();
+  NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
   switch(pattern) {
     case Divide3:
-      OpRegRegRegShift(cu, kOpSub, rl_result.low_reg, r_hi,
+      OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
                rl_src.low_reg, EncodeShift(kArmAsr, 31));
       break;
     case Divide5:
-      OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
-      OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
                EncodeShift(kArmAsr, magic_table[lit].shift));
       break;
     case Divide7:
-      OpRegReg(cu, kOpAdd, r_hi, rl_src.low_reg);
-      OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
-      OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+      OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+      OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
                EncodeShift(kArmAsr, magic_table[lit].shift));
       break;
     default:
       LOG(FATAL) << "Unexpected pattern: " << pattern;
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-LIR* ArmCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
                     int reg1, int base, int offset, ThrowKind kind)
 {
   LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
   return NULL;
 }
 
-RegLocation ArmCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
                                      bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
   return rl_dest;
 }
 
-RegLocation ArmCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
                                   bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
   return rl_dest;
 }
 
-bool ArmCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
 {
-  DCHECK_EQ(cu->instruction_set, kThumb2);
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
   RegLocation rl_src1 = info->args[0];
   RegLocation rl_src2 = info->args[1];
-  rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
-  OpIT(cu, (is_min) ? kCondGt : kCondLt, "E");
-  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
-  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
-  GenBarrier(cu);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  OpIT((is_min) ? kCondGt : kCondLt, "E");
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+  GenBarrier();
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-void ArmCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void ArmCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void ArmMir2Lir::OpTlsCmp(int offset, int val)
 {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
-bool ArmCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
-  DCHECK_EQ(cu->instruction_set, kThumb2);
+bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
   // Unused - RegLocation rl_src_unsafe = info->args[0];
   RegLocation rl_src_obj= info->args[1];  // Object - known non-null
   RegLocation rl_src_offset= info->args[2];  // long low
   rl_src_offset.wide = 0;  // ignore high half in info->args[3]
   RegLocation rl_src_expected= info->args[4];  // int or Object
   RegLocation rl_src_new_value= info->args[5];  // int or Object
-  RegLocation rl_dest = InlineTarget(cu, info);  // boolean place for result
+  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
 
 
   // Release store semantics, get the barrier out of the way.  TODO: revisit
-  GenMemBarrier(cu, kStoreLoad);
+  GenMemBarrier(kStoreLoad);
 
-  RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
-  RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
 
-  if (need_write_barrier && !cu->mir_graph->IsConstantNullRef(rl_new_value)) {
+  if (need_write_barrier && !mir_graph_->IsConstantNullRef(rl_new_value)) {
     // Mark card for object assuming new value is stored.
-    MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
+    MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
   }
 
-  RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
 
-  int r_ptr = AllocTemp(cu);
-  OpRegRegReg(cu, kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
+  int r_ptr = AllocTemp();
+  OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
 
   // Free now unneeded rl_object and rl_offset to give more temps.
-  ClobberSReg(cu, rl_object.s_reg_low);
-  FreeTemp(cu, rl_object.low_reg);
-  ClobberSReg(cu, rl_offset.s_reg_low);
-  FreeTemp(cu, rl_offset.low_reg);
+  ClobberSReg(rl_object.s_reg_low);
+  FreeTemp(rl_object.low_reg);
+  ClobberSReg(rl_offset.s_reg_low);
+  FreeTemp(rl_offset.low_reg);
 
-  int r_old_value = AllocTemp(cu);
-  NewLIR3(cu, kThumb2Ldrex, r_old_value, r_ptr, 0);  // r_old_value := [r_ptr]
+  int r_old_value = AllocTemp();
+  NewLIR3(kThumb2Ldrex, r_old_value, r_ptr, 0);  // r_old_value := [r_ptr]
 
-  RegLocation rl_expected = LoadValue(cu, rl_src_expected, kCoreReg);
+  RegLocation rl_expected = LoadValue(rl_src_expected, kCoreReg);
 
   // if (r_old_value == rExpected) {
   //   [r_ptr] <- r_new_value && r_result := success ? 0 : 1
@@ -565,70 +561,70 @@
   // } else {
   //   r_result := 0
   // }
-  OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
-  FreeTemp(cu, r_old_value);  // Now unneeded.
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpIT(cu, kCondEq, "TE");
-  NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
-  FreeTemp(cu, r_ptr);  // Now unneeded.
-  OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
-  OpRegReg(cu, kOpXor, rl_result.low_reg, rl_result.low_reg);
+  OpRegReg(kOpCmp, r_old_value, rl_expected.low_reg);
+  FreeTemp(r_old_value);  // Now unneeded.
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpIT(kCondEq, "TE");
+  NewLIR4(kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
+  FreeTemp(r_ptr);  // Now unneeded.
+  OpRegImm(kOpXor, rl_result.low_reg, 1);
+  OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
 
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 
   return true;
 }
 
-LIR* ArmCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
+LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target)
 {
-  return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+  return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
 }
 
-LIR* ArmCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* ArmMir2Lir::OpVldm(int rBase, int count)
 {
-  return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
+  return NewLIR3(kThumb2Vldms, rBase, fr0, count);
 }
 
-LIR* ArmCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* ArmMir2Lir::OpVstm(int rBase, int count)
 {
-  return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
+  return NewLIR3(kThumb2Vstms, rBase, fr0, count);
 }
 
-void ArmCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit)
 {
-  OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+  OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
                    EncodeShift(kArmLsl, second_bit - first_bit));
   if (first_bit != 0) {
-    OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
   }
 }
 
-void ArmCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
 {
-  int t_reg = AllocTemp(cu);
-  NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
-  FreeTemp(cu, t_reg);
-  GenCheck(cu, kCondEq, kThrowDivZero);
+  int t_reg = AllocTemp();
+  NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+  FreeTemp(t_reg);
+  GenCheck(kCondEq, kThrowDivZero);
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* ArmCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* ArmMir2Lir::OpTestSuspend(LIR* target)
 {
-  NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
-  return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
+  NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
+  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
 }
 
 // Decrement register and branch on condition
-LIR* ArmCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
 {
   // Combine sub & test using sub setflags encoding here
-  NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
-  return OpCondBranch(cu, c_code, target);
+  NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
+  return OpCondBranch(c_code, target);
 }
 
-void ArmCodegen::GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
+void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
   int dmb_flavor;
@@ -643,29 +639,29 @@
       dmb_flavor = kSY;  // quiet gcc.
       break;
   }
-  LIR* dmb = NewLIR1(cu, kThumb2Dmb, dmb_flavor);
+  LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
   dmb->def_mask = ENCODE_ALL;
 #endif
 }
 
-void ArmCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
 {
-  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int z_reg = AllocTemp(cu);
-  LoadConstantNoClobber(cu, z_reg, 0);
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int z_reg = AllocTemp();
+  LoadConstantNoClobber(z_reg, 0);
   // Check for destructive overlap
   if (rl_result.low_reg == rl_src.high_reg) {
-    int t_reg = AllocTemp(cu);
-    OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
-    OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, t_reg);
-    FreeTemp(cu, t_reg);
+    int t_reg = AllocTemp();
+    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
+    FreeTemp(t_reg);
   } else {
-    OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
-    OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
+    OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+    OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
   }
-  FreeTemp(cu, z_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  FreeTemp(z_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
 
@@ -674,14 +670,14 @@
   * is not usual for dx to generate, but it is legal (for now).  In a future rev of
   * dex, we'll want to make this case illegal.
   */
-static bool BadOverlap(CompilationUnit* cu, RegLocation rl_src, RegLocation rl_dest)
+bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest)
 {
   DCHECK(rl_src.wide);
   DCHECK(rl_dest.wide);
-  return (abs(cu->mir_graph->SRegToVReg(rl_src.s_reg_low) - cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)) == 1);
+  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
 }
 
-void ArmCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
     /*
@@ -694,92 +690,92 @@
      * overlap with either operand and send that case to a runtime handler.
      */
     RegLocation rl_result;
-    if (BadOverlap(cu, rl_src1, rl_dest) || (BadOverlap(cu, rl_src2, rl_dest))) {
+    if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
       int func_offset = ENTRYPOINT_OFFSET(pLmul);
-      FlushAllRegs(cu);
-      CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(cu, false);
-      StoreValueWide(cu, rl_dest, rl_result);
+      FlushAllRegs();
+      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(false);
+      StoreValueWide(rl_dest, rl_result);
       return;
     }
     // Temporarily add LR to the temp pool, and assign it to tmp1
-    MarkTemp(cu, rARM_LR);
-    FreeTemp(cu, rARM_LR);
+    MarkTemp(rARM_LR);
+    FreeTemp(rARM_LR);
     int tmp1 = rARM_LR;
-    LockTemp(cu, rARM_LR);
+    LockTemp(rARM_LR);
 
-    rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
 
     bool special_case = true;
     // If operands are the same, or any pair has been promoted we're not the special case.
     if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
-        (!IsTemp(cu, rl_src1.low_reg) && !IsTemp(cu, rl_src1.high_reg)) ||
-        (!IsTemp(cu, rl_src2.low_reg) && !IsTemp(cu, rl_src2.high_reg))) {
+        (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
+        (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
       special_case = false;
     }
     // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
-    int res_lo = AllocTemp(cu);
+    int res_lo = AllocTemp();
     int res_hi;
     if (rl_src1.low_reg == rl_src2.low_reg) {
-      res_hi = AllocTemp(cu);
-      NewLIR3(cu, kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
-      NewLIR4(cu, kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
-      OpRegRegRegShift(cu, kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+      res_hi = AllocTemp();
+      NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
+      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
+      OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
     } else {
       // In the special case, all temps are now allocated
-      NewLIR3(cu, kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
+      NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
       if (special_case) {
         DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
         DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
-        FreeTemp(cu, rl_src1.high_reg);
+        FreeTemp(rl_src1.high_reg);
       }
-      res_hi = AllocTemp(cu);
+      res_hi = AllocTemp();
 
-      NewLIR4(cu, kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
-      NewLIR4(cu, kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
-      NewLIR4(cu, kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
+      NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
+      NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
+      NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
       if (special_case) {
-        FreeTemp(cu, rl_src1.low_reg);
-        Clobber(cu, rl_src1.low_reg);
-        Clobber(cu, rl_src1.high_reg);
+        FreeTemp(rl_src1.low_reg);
+        Clobber(rl_src1.low_reg);
+        Clobber(rl_src1.high_reg);
       }
     }
-    FreeTemp(cu, tmp1);
-    rl_result = GetReturnWide(cu, false); // Just using as a template.
+    FreeTemp(tmp1);
+    rl_result = GetReturnWide(false); // Just using as a template.
     rl_result.low_reg = res_lo;
     rl_result.high_reg = res_hi;
-    StoreValueWide(cu, rl_dest, rl_result);
+    StoreValueWide(rl_dest, rl_result);
     // Now, restore lr to its non-temp status.
-    Clobber(cu, rARM_LR);
-    UnmarkTemp(cu, rARM_LR);
+    Clobber(rARM_LR);
+    UnmarkTemp(rARM_LR);
 }
 
-void ArmCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
 }
 
-void ArmCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
 }
 
-void ArmCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
 }
 
-void ArmCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
 }
 
-void ArmCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of genXoLong for Arm";
@@ -788,7 +784,7 @@
 /*
  * Generate array load
  */
-void ArmCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_dest, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
@@ -796,9 +792,9 @@
   int data_offset;
   RegLocation rl_result;
   bool constant_index = rl_index.is_const;
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
   if (!constant_index) {
-    rl_index = LoadValue(cu, rl_index, kCoreReg);
+    rl_index = LoadValue(rl_index, kCoreReg);
   }
 
   if (rl_dest.wide) {
@@ -809,18 +805,18 @@
 
   // If index is constant, just fold it into the data offset
   if (constant_index) {
-    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
+    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
   }
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
-    reg_len = AllocTemp(cu);
+    reg_len = AllocTemp();
     /* Get len */
-    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
   if (rl_dest.wide || rl_dest.fp || constant_index) {
     int reg_ptr;
@@ -828,50 +824,50 @@
       reg_ptr = rl_array.low_reg;  // NOTE: must not alter reg_ptr in constant case.
     } else {
       // No special indexed operation, lea + load w/ displacement
-      reg_ptr = AllocTemp(cu);
-      OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+      reg_ptr = AllocTemp();
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
                        EncodeShift(kArmLsl, scale));
-      FreeTemp(cu, rl_index.low_reg);
+      FreeTemp(rl_index.low_reg);
     }
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
-        GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
-      FreeTemp(cu, reg_len);
+      FreeTemp(reg_len);
     }
     if (rl_dest.wide) {
-      LoadBaseDispWide(cu, reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+      LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
       if (!constant_index) {
-        FreeTemp(cu, reg_ptr);
+        FreeTemp(reg_ptr);
       }
-      StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
     } else {
-      LoadBaseDisp(cu, reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+      LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
       if (!constant_index) {
-        FreeTemp(cu, reg_ptr);
+        FreeTemp(reg_ptr);
       }
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
     }
   } else {
     // Offset base, then use indexed load
-    int reg_ptr = AllocTemp(cu);
-    OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
-    FreeTemp(cu, rl_array.low_reg);
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+    int reg_ptr = AllocTemp();
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+    FreeTemp(rl_array.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
       // TODO: change kCondCS to a more meaningful name, is the sense of
       // carry-set/clear flipped?
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
-    LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
-    FreeTemp(cu, reg_ptr);
-    StoreValue(cu, rl_dest, rl_result);
+    LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+    FreeTemp(reg_ptr);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
@@ -879,7 +875,7 @@
  * Generate array store
  *
  */
-void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_src, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
@@ -895,73 +891,73 @@
 
   // If index is constant, just fold it into the data offset.
   if (constant_index) {
-    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
+    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
   }
 
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
   if (!constant_index) {
-    rl_index = LoadValue(cu, rl_index, kCoreReg);
+    rl_index = LoadValue(rl_index, kCoreReg);
   }
 
   int reg_ptr;
   if (constant_index) {
     reg_ptr = rl_array.low_reg;
-  } else if (IsTemp(cu, rl_array.low_reg)) {
-    Clobber(cu, rl_array.low_reg);
+  } else if (IsTemp(rl_array.low_reg)) {
+    Clobber(rl_array.low_reg);
     reg_ptr = rl_array.low_reg;
   } else {
-    reg_ptr = AllocTemp(cu);
+    reg_ptr = AllocTemp();
   }
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
-    reg_len = AllocTemp(cu);
+    reg_len = AllocTemp();
     //NOTE: max live temps(4) here.
     /* Get len */
-    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
   /* at this point, reg_ptr points to array, 2 live temps */
   if (rl_src.wide || rl_src.fp || constant_index) {
     if (rl_src.wide) {
-      rl_src = LoadValueWide(cu, rl_src, reg_class);
+      rl_src = LoadValueWide(rl_src, reg_class);
     } else {
-      rl_src = LoadValue(cu, rl_src, reg_class);
+      rl_src = LoadValue(rl_src, reg_class);
     }
     if (!constant_index) {
-      OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
                        EncodeShift(kArmLsl, scale));
     }
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
-        GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+        GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
-      FreeTemp(cu, reg_len);
+      FreeTemp(reg_len);
     }
 
     if (rl_src.wide) {
-      StoreBaseDispWide(cu, reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+      StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
     } else {
-      StoreBaseDisp(cu, reg_ptr, data_offset, rl_src.low_reg, size);
+      StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
     }
   } else {
     /* reg_ptr -> array data */
-    OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
-    rl_src = LoadValue(cu, rl_src, reg_class);
+    OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+    rl_src = LoadValue(rl_src, reg_class);
     if (needs_range_check) {
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
-    StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+    StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
                      scale, size);
   }
   if (!constant_index) {
-    FreeTemp(cu, reg_ptr);
+    FreeTemp(reg_ptr);
   }
 }
 
@@ -969,144 +965,144 @@
  * Generate array store
  *
  */
-void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale)
 {
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
 
-  FlushAllRegs(cu);  // Use explicit registers
-  LockCallTemps(cu);
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
 
   int r_value = TargetReg(kArg0);  // Register holding value
   int r_array_class = TargetReg(kArg1);  // Register holding array's Class
   int r_array = TargetReg(kArg2);  // Register holding array
   int r_index = TargetReg(kArg3);  // Register holding index into array
 
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Grab array
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Grab value
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Grab index
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
 
-  GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
 
   // Store of null?
-  LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
 
   // Get the array's class.
-  LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Reload array
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Reload index
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Reload value
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
   r_array_class = INVALID_REG;
 
   // Branch here if value to be stored == null
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   null_value_check->target = target;
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
     reg_len = TargetReg(kArg1);
-    LoadWordDisp(cu, r_array, len_offset, reg_len);  // Get len
+    LoadWordDisp(r_array, len_offset, reg_len);  // Get len
   }
   /* r_ptr -> array data */
-  int r_ptr = AllocTemp(cu);
-  OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+  int r_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, r_ptr, r_array, data_offset);
   if (needs_range_check) {
-    GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+    GenRegRegCheck(kCondCs, r_index, reg_len, kThrowArrayBounds);
   }
-  StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
-  FreeTemp(cu, r_ptr);
-  FreeTemp(cu, r_index);
-  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
-    MarkGCCard(cu, r_value, r_array);
+  StoreBaseIndexed(r_ptr, r_index, r_value, scale, kWord);
+  FreeTemp(r_ptr);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
   }
 }
 
-void ArmCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift)
 {
-  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+  rl_src = LoadValueWide(rl_src, kCoreReg);
   // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = cu->mir_graph->ConstantValue(rl_shift) & 0x3f;
+  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
   if (shift_amount == 0) {
-    StoreValueWide(cu, rl_dest, rl_src);
+    StoreValueWide(rl_dest, rl_src);
     return;
   }
-  if (BadOverlap(cu, rl_src, rl_dest)) {
-    GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
+  if (BadOverlap(rl_src, rl_dest)) {
+    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
     return;
   }
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   switch(opcode) {
     case Instruction::SHL_LONG:
     case Instruction::SHL_LONG_2ADDR:
       if (shift_amount == 1) {
-        OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
-        OpRegRegReg(cu, kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
+        OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
+        OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
       } else if (shift_amount == 32) {
-        OpRegCopy(cu, rl_result.high_reg, rl_src.low_reg);
-        LoadConstant(cu, rl_result.low_reg, 0);
+        OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+        LoadConstant(rl_result.low_reg, 0);
       } else if (shift_amount > 31) {
-        OpRegRegImm(cu, kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
-        LoadConstant(cu, rl_result.low_reg, 0);
+        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
+        LoadConstant(rl_result.low_reg, 0);
       } else {
-        OpRegRegImm(cu, kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
-        OpRegRegRegShift(cu, kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
+        OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
                          EncodeShift(kArmLsr, 32 - shift_amount));
-        OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
+        OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
       }
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(cu, rl_result.low_reg, rl_src.high_reg);
-        OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
       } else if (shift_amount > 31) {
-        OpRegRegImm(cu, kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
-        OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+        OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
       } else {
-        int t_reg = AllocTemp(cu);
-        OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, shift_amount);
-        OpRegRegRegShift(cu, kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+        int t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
                          EncodeShift(kArmLsl, 32 - shift_amount));
-        FreeTemp(cu, t_reg);
-        OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+        FreeTemp(t_reg);
+        OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
       }
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(cu, rl_result.low_reg, rl_src.high_reg);
-        LoadConstant(cu, rl_result.high_reg, 0);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        LoadConstant(rl_result.high_reg, 0);
       } else if (shift_amount > 31) {
-        OpRegRegImm(cu, kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
-        LoadConstant(cu, rl_result.high_reg, 0);
+        OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+        LoadConstant(rl_result.high_reg, 0);
       } else {
-        int t_reg = AllocTemp(cu);
-        OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, shift_amount);
-        OpRegRegRegShift(cu, kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+        int t_reg = AllocTemp();
+        OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+        OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
                          EncodeShift(kArmLsl, 32 - shift_amount));
-        FreeTemp(cu, t_reg);
-        OpRegRegImm(cu, kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+        FreeTemp(t_reg);
+        OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
       }
       break;
     default:
       LOG(FATAL) << "Unexpected case";
   }
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void ArmCodegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
     if (!rl_src2.is_const) {
       // Don't bother with special handling for subtract from immediate.
-      GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
       return;
     }
   } else {
@@ -1118,12 +1114,12 @@
       rl_src2 = rl_temp;
     }
   }
-  if (BadOverlap(cu, rl_src1, rl_dest)) {
-    GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+  if (BadOverlap(rl_src1, rl_dest)) {
+    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
     return;
   }
   DCHECK(rl_src2.is_const);
-  int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
+  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
   uint32_t val_lo = Low32Bits(val);
   uint32_t val_hi = High32Bits(val);
   int32_t mod_imm_lo = ModifiedImmediate(val_lo);
@@ -1136,54 +1132,54 @@
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
       if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
-        GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
         return;
       }
       break;
     default:
       break;
   }
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
   switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      NewLIR3(cu, kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
-      NewLIR3(cu, kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+      NewLIR3(kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+      NewLIR3(kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
       if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
-        OpRegRegImm(cu, kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
+        OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
       }
       if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
-        OpRegRegImm(cu, kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
+        OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
       }
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      OpRegRegImm(cu, kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
-      OpRegRegImm(cu, kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
+      OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
+      OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
       break;
     case Instruction::AND_LONG:
     case Instruction::AND_LONG_2ADDR:
       if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
-        OpRegRegImm(cu, kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
+        OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
       }
       if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
-        OpRegRegImm(cu, kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
+        OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
       }
       break;
     case Instruction::SUB_LONG_2ADDR:
     case Instruction::SUB_LONG:
-      NewLIR3(cu, kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
-      NewLIR3(cu, kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+      NewLIR3(kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+      NewLIR3(kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
       break;
     default:
       LOG(FATAL) << "Unexpected opcode " << opcode;
   }
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index 6d8102f..43bbb69 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -18,9 +18,7 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/quick/ralloc_util.h"
 
 namespace art {
 
@@ -35,32 +33,32 @@
 static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
                         fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
 
-RegLocation ArmCodegen::LocCReturn()
+RegLocation ArmMir2Lir::LocCReturn()
 {
   RegLocation res = ARM_LOC_C_RETURN;
   return res;
 }
 
-RegLocation ArmCodegen::LocCReturnWide()
+RegLocation ArmMir2Lir::LocCReturnWide()
 {
   RegLocation res = ARM_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation ArmCodegen::LocCReturnFloat()
+RegLocation ArmMir2Lir::LocCReturnFloat()
 {
   RegLocation res = ARM_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation ArmCodegen::LocCReturnDouble()
+RegLocation ArmMir2Lir::LocCReturnDouble()
 {
   RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int ArmCodegen::TargetReg(SpecialTargetRegister reg) {
+int ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rARM_SELF; break;
@@ -86,19 +84,19 @@
 
 
 // Create a double from a pair of singles.
-int ArmCodegen::S2d(int low_reg, int high_reg)
+int ArmMir2Lir::S2d(int low_reg, int high_reg)
 {
   return ARM_S2D(low_reg, high_reg);
 }
 
 // Return mask to strip off fp reg flags and bias.
-uint32_t ArmCodegen::FpRegMask()
+uint32_t ArmMir2Lir::FpRegMask()
 {
   return ARM_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool ArmCodegen::SameRegType(int reg1, int reg2)
+bool ArmMir2Lir::SameRegType(int reg1, int reg2)
 {
   return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2));
 }
@@ -106,7 +104,7 @@
 /*
  * Decode the register id.
  */
-uint64_t ArmCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t ArmMir2Lir::GetRegMaskCommon(int reg)
 {
   uint64_t seed;
   int shift;
@@ -123,17 +121,17 @@
   return (seed << shift);
 }
 
-uint64_t ArmCodegen::GetPCUseDefEncoding()
+uint64_t ArmMir2Lir::GetPCUseDefEncoding()
 {
   return ENCODE_ARM_REG_PC;
 }
 
-void ArmCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir)
 {
-  DCHECK_EQ(cu->instruction_set, kThumb2);
+  DCHECK_EQ(cu_->instruction_set, kThumb2);
 
   // Thumb2 specific setup
-  uint64_t flags = ArmCodegen::EncodingMap[lir->opcode].flags;
+  uint64_t flags = ArmMir2Lir::EncodingMap[lir->opcode].flags;
   int opcode = lir->opcode;
 
   if (flags & REG_DEF_SP) {
@@ -158,7 +156,7 @@
 
   if (flags & REG_DEF_FPCS_LIST2) {
     for (int i = 0; i < lir->operands[2]; i++) {
-      SetupRegMask(cu, &lir->def_mask, lir->operands[1] + i);
+      SetupRegMask(&lir->def_mask, lir->operands[1] + i);
     }
   }
 
@@ -185,12 +183,12 @@
 
   if (flags & REG_USE_FPCS_LIST2) {
     for (int i = 0; i < lir->operands[2]; i++) {
-      SetupRegMask(cu, &lir->use_mask, lir->operands[1] + i);
+      SetupRegMask(&lir->use_mask, lir->operands[1] + i);
     }
   }
   /* Fixup for kThumbPush/lr and kThumbPop/pc */
   if (opcode == kThumbPush || opcode == kThumbPop) {
-    uint64_t r8Mask = GetRegMaskCommon(cu, r8);
+    uint64_t r8Mask = GetRegMaskCommon(r8);
     if ((opcode == kThumbPush) && (lir->use_mask & r8Mask)) {
       lir->use_mask &= ~r8Mask;
       lir->use_mask |= ENCODE_ARM_REG_LR;
@@ -204,7 +202,7 @@
   }
 }
 
-ArmConditionCode ArmCodegen::ArmConditionEncoding(ConditionCode ccode)
+ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode)
 {
   ArmConditionCode res;
   switch (ccode) {
@@ -317,7 +315,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.c.
  */
-std::string ArmCodegen::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
+std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
 {
   std::string buf;
   int i;
@@ -456,7 +454,7 @@
   return buf;
 }
 
-void ArmCodegen::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
+void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -502,98 +500,98 @@
   }
 }
 
-bool ArmCodegen::IsUnconditionalBranch(LIR* lir)
+bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir)
 {
   return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
 }
 
-bool InitArmCodegen(CompilationUnit* cu)
-{
-  cu->cg.reset(new ArmCodegen());
+ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+  // Sanity check - make sure encoding map lines up.
   for (int i = 0; i < kArmLast; i++) {
-    if (ArmCodegen::EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << ArmCodegen::EncodingMap[i].name
+    if (ArmMir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
                  << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(ArmCodegen::EncodingMap[i].opcode);
+                 << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
     }
   }
-  return true;
+}
+
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
+  return new ArmMir2Lir(cu, mir_graph);
 }
 
 /*
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int ArmCodegen::AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
+int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class)
 {
   int high_reg;
   int low_reg;
   int res = 0;
 
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    low_reg = AllocTempDouble(cu);
+    low_reg = AllocTempDouble();
     high_reg = low_reg + 1;
   } else {
-    low_reg = AllocTemp(cu);
-    high_reg = AllocTemp(cu);
+    low_reg = AllocTemp();
+    high_reg = AllocTemp();
   }
   res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
   return res;
 }
 
-int ArmCodegen::AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
+int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class)
 {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
-    return AllocTempFloat(cu);
-  return AllocTemp(cu);
+    return AllocTempFloat();
+  return AllocTemp();
 }
 
-void ArmCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
+void ArmMir2Lir::CompilerInitializeRegAlloc()
 {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  RegisterPool *pool =
-      static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
-  cu->reg_pool = pool;
-  pool->num_core_regs = num_regs;
-  pool->core_regs = reinterpret_cast<RegisterInfo*>
-      (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
-  pool->num_fp_regs = num_fp_regs;
-  pool->FPRegs = static_cast<RegisterInfo*>
-      (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
-  CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
-  CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
+      (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs = static_cast<RegisterInfo*>
+      (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
   for (int i = 0; i < num_reserved; i++) {
     if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
       //To measure cost of suspend check
       continue;
     }
-    MarkInUse(cu, ReservedRegs[i]);
+    MarkInUse(ReservedRegs[i]);
   }
   // Mark temp regs - all others not in use can be used for promotion
   for (int i = 0; i < num_temps; i++) {
-    MarkTemp(cu, core_temps[i]);
+    MarkTemp(core_temps[i]);
   }
   for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(cu, fp_temps[i]);
+    MarkTemp(fp_temps[i]);
   }
 
   // Start allocation at r2 in an attempt to avoid clobbering return values
-  pool->next_core_reg = r2;
+  reg_pool_->next_core_reg = r2;
 }
 
-void ArmCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
                      RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
     (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
     // No overlap, free both
-    FreeTemp(cu, rl_free.low_reg);
-    FreeTemp(cu, rl_free.high_reg);
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
   }
 }
 /*
@@ -602,10 +600,10 @@
  * machinery is in place, always spill lr.
  */
 
-void ArmCodegen::AdjustSpillMask(CompilationUnit* cu)
+void ArmMir2Lir::AdjustSpillMask()
 {
-  cu->core_spill_mask |= (1 << rARM_LR);
-  cu->num_core_spills++;
+  core_spill_mask_ |= (1 << rARM_LR);
+  num_core_spills_++;
 }
 
 /*
@@ -614,26 +612,26 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void ArmCodegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+void ArmMir2Lir::MarkPreservedSingle(int v_reg, int reg)
 {
   DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
   reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
   // Ensure fp_vmap_table is large enough
-  int table_size = cu->fp_vmap_table.size();
+  int table_size = fp_vmap_table_.size();
   for (int i = table_size; i < (reg + 1); i++) {
-    cu->fp_vmap_table.push_back(INVALID_VREG);
+    fp_vmap_table_.push_back(INVALID_VREG);
   }
   // Add the current mapping
-  cu->fp_vmap_table[reg] = v_reg;
+  fp_vmap_table_[reg] = v_reg;
   // Size of fp_vmap_table is high-water mark, use to set mask
-  cu->num_fp_spills = cu->fp_vmap_table.size();
-  cu->fp_spill_mask = ((1 << cu->num_fp_spills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
+  num_fp_spills_ = fp_vmap_table_.size();
+  fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
 }
 
-void ArmCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void ArmMir2Lir::FlushRegWide(int reg1, int reg2)
 {
-  RegisterInfo* info1 = GetRegInfo(cu, reg1);
-  RegisterInfo* info2 = GetRegInfo(cu, reg2);
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
   DCHECK(info1 && info2 && info1->pair && info2->pair &&
        (info1->partner == info2->reg) &&
        (info2->partner == info1->reg));
@@ -645,121 +643,121 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (cu->mir_graph->SRegToVReg(info2->s_reg) <
-      cu->mir_graph->SRegToVReg(info1->s_reg))
+    if (mir_graph_->SRegToVReg(info2->s_reg) <
+      mir_graph_->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rARM_SP, VRegOffset(v_reg), info1->reg, info1->partner);
   }
 }
 
-void ArmCodegen::FlushReg(CompilationUnit* cu, int reg)
+void ArmMir2Lir::FlushReg(int reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
-    StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rARM_SP, VRegOffset(v_reg), reg, kWord);
   }
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool ArmCodegen::IsFpReg(int reg) {
+bool ArmMir2Lir::IsFpReg(int reg) {
   return ARM_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void ArmCodegen::ClobberCalleeSave(CompilationUnit *cu)
+void ArmMir2Lir::ClobberCalleeSave()
 {
-  Clobber(cu, r0);
-  Clobber(cu, r1);
-  Clobber(cu, r2);
-  Clobber(cu, r3);
-  Clobber(cu, r12);
-  Clobber(cu, r14lr);
-  Clobber(cu, fr0);
-  Clobber(cu, fr1);
-  Clobber(cu, fr2);
-  Clobber(cu, fr3);
-  Clobber(cu, fr4);
-  Clobber(cu, fr5);
-  Clobber(cu, fr6);
-  Clobber(cu, fr7);
-  Clobber(cu, fr8);
-  Clobber(cu, fr9);
-  Clobber(cu, fr10);
-  Clobber(cu, fr11);
-  Clobber(cu, fr12);
-  Clobber(cu, fr13);
-  Clobber(cu, fr14);
-  Clobber(cu, fr15);
+  Clobber(r0);
+  Clobber(r1);
+  Clobber(r2);
+  Clobber(r3);
+  Clobber(r12);
+  Clobber(r14lr);
+  Clobber(fr0);
+  Clobber(fr1);
+  Clobber(fr2);
+  Clobber(fr3);
+  Clobber(fr4);
+  Clobber(fr5);
+  Clobber(fr6);
+  Clobber(fr7);
+  Clobber(fr8);
+  Clobber(fr9);
+  Clobber(fr10);
+  Clobber(fr11);
+  Clobber(fr12);
+  Clobber(fr13);
+  Clobber(fr14);
+  Clobber(fr15);
 }
 
-RegLocation ArmCodegen::GetReturnWideAlt(CompilationUnit* cu)
+RegLocation ArmMir2Lir::GetReturnWideAlt()
 {
   RegLocation res = LocCReturnWide();
   res.low_reg = r2;
   res.high_reg = r3;
-  Clobber(cu, r2);
-  Clobber(cu, r3);
-  MarkInUse(cu, r2);
-  MarkInUse(cu, r3);
-  MarkPair(cu, res.low_reg, res.high_reg);
+  Clobber(r2);
+  Clobber(r3);
+  MarkInUse(r2);
+  MarkInUse(r3);
+  MarkPair(res.low_reg, res.high_reg);
   return res;
 }
 
-RegLocation ArmCodegen::GetReturnAlt(CompilationUnit* cu)
+RegLocation ArmMir2Lir::GetReturnAlt()
 {
   RegLocation res = LocCReturn();
   res.low_reg = r1;
-  Clobber(cu, r1);
-  MarkInUse(cu, r1);
+  Clobber(r1);
+  MarkInUse(r1);
   return res;
 }
 
-RegisterInfo* ArmCodegen::GetRegInfo(CompilationUnit* cu, int reg)
+ArmMir2Lir::RegisterInfo* ArmMir2Lir::GetRegInfo(int reg)
 {
-  return ARM_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & ARM_FP_REG_MASK]
-      : &cu->reg_pool->core_regs[reg];
+  return ARM_FPREG(reg) ? &reg_pool_->FPRegs[reg & ARM_FP_REG_MASK]
+      : &reg_pool_->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void ArmCodegen::LockCallTemps(CompilationUnit* cu)
+void ArmMir2Lir::LockCallTemps()
 {
-  LockTemp(cu, r0);
-  LockTemp(cu, r1);
-  LockTemp(cu, r2);
-  LockTemp(cu, r3);
+  LockTemp(r0);
+  LockTemp(r1);
+  LockTemp(r2);
+  LockTemp(r3);
 }
 
 /* To be used when explicitly managing register use */
-void ArmCodegen::FreeCallTemps(CompilationUnit* cu)
+void ArmMir2Lir::FreeCallTemps()
 {
-  FreeTemp(cu, r0);
-  FreeTemp(cu, r1);
-  FreeTemp(cu, r2);
-  FreeTemp(cu, r3);
+  FreeTemp(r0);
+  FreeTemp(r1);
+  FreeTemp(r2);
+  FreeTemp(r3);
 }
 
-int ArmCodegen::LoadHelper(CompilationUnit* cu, int offset)
+int ArmMir2Lir::LoadHelper(int offset)
 {
-  LoadWordDisp(cu, rARM_SELF, offset, rARM_LR);
+  LoadWordDisp(rARM_SELF, offset, rARM_LR);
   return rARM_LR;
 }
 
-uint64_t ArmCodegen::GetTargetInstFlags(int opcode)
+uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode)
 {
-  return ArmCodegen::EncodingMap[opcode].flags;
+  return ArmMir2Lir::EncodingMap[opcode].flags;
 }
 
-const char* ArmCodegen::GetTargetInstName(int opcode)
+const char* ArmMir2Lir::GetTargetInstName(int opcode)
 {
-  return ArmCodegen::EncodingMap[opcode].name;
+  return ArmMir2Lir::EncodingMap[opcode].name;
 }
 
-const char* ArmCodegen::GetTargetInstFmt(int opcode)
+const char* ArmMir2Lir::GetTargetInstFmt(int opcode)
 {
-  return ArmCodegen::EncodingMap[opcode].fmt;
+  return ArmMir2Lir::EncodingMap[opcode].fmt;
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/arm/utility_arm.cc b/src/compiler/dex/quick/arm/utility_arm.cc
index 33e8210..c689f72 100644
--- a/src/compiler/dex/quick/arm/utility_arm.cc
+++ b/src/compiler/dex/quick/arm/utility_arm.cc
@@ -16,8 +16,7 @@
 
 #include "arm_lir.h"
 #include "codegen_arm.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
+#include "compiler/dex/quick/mir_to_lir.h"
 
 namespace art {
 
@@ -71,31 +70,31 @@
   return res;
 }
 
-static LIR* LoadFPConstantValue(CompilationUnit* cu, int r_dest, int value)
+LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value)
 {
   DCHECK(ARM_SINGLEREG(r_dest));
   if (value == 0) {
     // TODO: we need better info about the target CPU.  a vector exclusive or
     //       would probably be better here if we could rely on its existance.
     // Load an immediate +2.0 (which encodes to 0)
-    NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, 0);
+    NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
     // +0.0 = +2.0 - +2.0
-    return NewLIR3(cu, kThumb2Vsubs, r_dest, r_dest, r_dest);
+    return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
   } else {
     int encoded_imm = EncodeImmSingle(value);
     if (encoded_imm >= 0) {
-      return NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, encoded_imm);
+      return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
     }
   }
-  LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
   if (data_target == NULL) {
-    data_target = AddWordData(cu, &cu->literal_list, value);
+    data_target = AddWordData(&literal_list_, value);
   }
-  LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrs,
+  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
                           r_dest, r15pc, 0, 0, 0, data_target);
-  SetMemRefType(cu, load_pc_rel, true, kLiteral);
+  SetMemRefType(load_pc_rel, true, kLiteral);
   load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
-  AppendLIR(cu, load_pc_rel);
+  AppendLIR(load_pc_rel);
   return load_pc_rel;
 }
 
@@ -122,7 +121,7 @@
  * Determine whether value can be encoded as a Thumb2 modified
  * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
  */
-int ArmCodegen::ModifiedImmediate(uint32_t value)
+int ArmMir2Lir::ModifiedImmediate(uint32_t value)
 {
    int z_leading;
    int z_trailing;
@@ -152,22 +151,22 @@
    return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
 }
 
-bool ArmCodegen::InexpensiveConstantInt(int32_t value)
+bool ArmMir2Lir::InexpensiveConstantInt(int32_t value)
 {
   return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
 }
 
-bool ArmCodegen::InexpensiveConstantFloat(int32_t value)
+bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value)
 {
   return EncodeImmSingle(value) >= 0;
 }
 
-bool ArmCodegen::InexpensiveConstantLong(int64_t value)
+bool ArmMir2Lir::InexpensiveConstantLong(int64_t value)
 {
   return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
 }
 
-bool ArmCodegen::InexpensiveConstantDouble(int64_t value)
+bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value)
 {
   return EncodeImmDouble(value) >= 0;
 }
@@ -180,57 +179,57 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR* ArmCodegen::LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
+LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value)
 {
   LIR* res;
   int mod_imm;
 
   if (ARM_FPREG(r_dest)) {
-    return LoadFPConstantValue(cu, r_dest, value);
+    return LoadFPConstantValue(r_dest, value);
   }
 
   /* See if the value can be constructed cheaply */
   if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
-    return NewLIR2(cu, kThumbMovImm, r_dest, value);
+    return NewLIR2(kThumbMovImm, r_dest, value);
   }
   /* Check Modified immediate special cases */
   mod_imm = ModifiedImmediate(value);
   if (mod_imm >= 0) {
-    res = NewLIR2(cu, kThumb2MovImmShift, r_dest, mod_imm);
+    res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm);
     return res;
   }
   mod_imm = ModifiedImmediate(~value);
   if (mod_imm >= 0) {
-    res = NewLIR2(cu, kThumb2MvnImm12, r_dest, mod_imm);
+    res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm);
     return res;
   }
   /* 16-bit immediate? */
   if ((value & 0xffff) == value) {
-    res = NewLIR2(cu, kThumb2MovImm16, r_dest, value);
+    res = NewLIR2(kThumb2MovImm16, r_dest, value);
     return res;
   }
   /* Do a low/high pair */
-  res = NewLIR2(cu, kThumb2MovImm16, r_dest, Low16Bits(value));
-  NewLIR2(cu, kThumb2MovImm16H, r_dest, High16Bits(value));
+  res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
+  NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
   return res;
 }
 
-LIR* ArmCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target)
 {
-  LIR* res = NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched  during assembly*/);
+  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
   res->target = target;
   return res;
 }
 
-LIR* ArmCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
 {
-  LIR* branch = NewLIR2(cu, kThumb2BCond, 0 /* offset to be patched */,
+  LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
                         ArmConditionEncoding(cc));
   branch->target = target;
   return branch;
 }
 
-LIR* ArmCodegen::OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
+LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src)
 {
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
@@ -240,10 +239,10 @@
     default:
       LOG(FATAL) << "Bad opcode " << op;
   }
-  return NewLIR1(cu, opcode, r_dest_src);
+  return NewLIR1(opcode, r_dest_src);
 }
 
-LIR* ArmCodegen::OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
+LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
                                int shift)
 {
   bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
@@ -332,39 +331,39 @@
       break;
     case kOp2Byte:
       DCHECK_EQ(shift, 0);
-      return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
+      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
     case kOp2Short:
       DCHECK_EQ(shift, 0);
-      return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
+      return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
     case kOp2Char:
       DCHECK_EQ(shift, 0);
-      return NewLIR4(cu, kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
+      return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
     default:
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
   DCHECK_GE(static_cast<int>(opcode), 0);
   if (EncodingMap[opcode].flags & IS_BINARY_OP)
-    return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+    return NewLIR2(opcode, r_dest_src1, r_src2);
   else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
     if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
-      return NewLIR3(cu, opcode, r_dest_src1, r_src2, shift);
+      return NewLIR3(opcode, r_dest_src1, r_src2, shift);
     else
-      return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_src2);
+      return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
   } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
-    return NewLIR4(cu, opcode, r_dest_src1, r_dest_src1, r_src2, shift);
+    return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
   else {
     LOG(FATAL) << "Unexpected encoding operand count";
     return NULL;
   }
 }
 
-LIR* ArmCodegen::OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
 {
-  return OpRegRegShift(cu, op, r_dest_src1, r_src2, 0);
+  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
 }
 
-LIR* ArmCodegen::OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
                                   int r_src2, int shift)
 {
   ArmOpcode opcode = kThumbBkpt;
@@ -424,19 +423,19 @@
   }
   DCHECK_GE(static_cast<int>(opcode), 0);
   if (EncodingMap[opcode].flags & IS_QUAD_OP)
-    return NewLIR4(cu, opcode, r_dest, r_src1, r_src2, shift);
+    return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
   else {
     DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
-    return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
+    return NewLIR3(opcode, r_dest, r_src1, r_src2);
   }
 }
 
-LIR* ArmCodegen::OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2)
+LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2)
 {
-  return OpRegRegRegShift(cu, op, r_dest, r_src1, r_src2, 0);
+  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
 }
 
-LIR* ArmCodegen::OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value)
+LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value)
 {
   LIR* res;
   bool neg = (value < 0);
@@ -450,28 +449,28 @@
   switch (op) {
     case kOpLsl:
       if (all_low_regs)
-        return NewLIR3(cu, kThumbLslRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
       else
-        return NewLIR3(cu, kThumb2LslRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
     case kOpLsr:
       if (all_low_regs)
-        return NewLIR3(cu, kThumbLsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
       else
-        return NewLIR3(cu, kThumb2LsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
     case kOpAsr:
       if (all_low_regs)
-        return NewLIR3(cu, kThumbAsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
       else
-        return NewLIR3(cu, kThumb2AsrRRI5, r_dest, r_src1, value);
+        return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
     case kOpRor:
-      return NewLIR3(cu, kThumb2RorRRI5, r_dest, r_src1, value);
+      return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
     case kOpAdd:
       if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
         (value <= 1020) && ((value & 0x3)==0)) {
-        return NewLIR3(cu, kThumbAddSpRel, r_dest, r_src1, value >> 2);
+        return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
       } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
           (value <= 1020) && ((value & 0x3)==0)) {
-        return NewLIR3(cu, kThumbAddPcRel, r_dest, r_src1, value >> 2);
+        return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
       }
       // Note: intentional fallthrough
     case kOpSub:
@@ -480,13 +479,13 @@
           opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
         else
           opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
-        return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+        return NewLIR3(opcode, r_dest, r_src1, abs_value);
       } else if ((abs_value & 0xff) == abs_value) {
         if (op == kOpAdd)
           opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
         else
           opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
-        return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+        return NewLIR3(opcode, r_dest, r_src1, abs_value);
       }
       if (mod_imm_neg >= 0) {
         op = (op == kOpAdd) ? kOpSub : kOpAdd;
@@ -533,12 +532,12 @@
       int mod_imm = ModifiedImmediate(value);
       LIR* res;
       if (mod_imm >= 0) {
-        res = NewLIR2(cu, kThumb2CmpRI12, r_src1, mod_imm);
+        res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm);
       } else {
-        int r_tmp = AllocTemp(cu);
-        res = LoadConstant(cu, r_tmp, value);
-        OpRegReg(cu, kOpCmp, r_src1, r_tmp);
-        FreeTemp(cu, r_tmp);
+        int r_tmp = AllocTemp();
+        res = LoadConstant(r_tmp, value);
+        OpRegReg(kOpCmp, r_src1, r_tmp);
+        FreeTemp(r_tmp);
       }
       return res;
     }
@@ -547,21 +546,21 @@
   }
 
   if (mod_imm >= 0) {
-    return NewLIR3(cu, opcode, r_dest, r_src1, mod_imm);
+    return NewLIR3(opcode, r_dest, r_src1, mod_imm);
   } else {
-    int r_scratch = AllocTemp(cu);
-    LoadConstant(cu, r_scratch, value);
+    int r_scratch = AllocTemp();
+    LoadConstant(r_scratch, value);
     if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(cu, alt_opcode, r_dest, r_src1, r_scratch, 0);
+      res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
     else
-      res = NewLIR3(cu, alt_opcode, r_dest, r_src1, r_scratch);
-    FreeTemp(cu, r_scratch);
+      res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
+    FreeTemp(r_scratch);
     return res;
   }
 }
 
 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* ArmCodegen::OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
+LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
 {
   bool neg = (value < 0);
   int abs_value = (neg) ? -value : value;
@@ -571,7 +570,7 @@
     case kOpAdd:
       if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
         DCHECK_EQ((value & 0x3), 0);
-        return NewLIR1(cu, kThumbAddSpI7, value >> 2);
+        return NewLIR1(kThumbAddSpI7, value >> 2);
       } else if (short_form) {
         opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
       }
@@ -579,7 +578,7 @@
     case kOpSub:
       if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
         DCHECK_EQ((value & 0x3), 0);
-        return NewLIR1(cu, kThumbSubSpI7, value >> 2);
+        return NewLIR1(kThumbSubSpI7, value >> 2);
       } else if (short_form) {
         opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
       }
@@ -600,13 +599,13 @@
       break;
   }
   if (short_form)
-    return NewLIR2(cu, opcode, r_dest_src1, abs_value);
+    return NewLIR2(opcode, r_dest_src1, abs_value);
   else {
-    return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
   }
 }
 
-LIR* ArmCodegen::LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value)
+LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
 {
   LIR* res = NULL;
   int32_t val_lo = Low32Bits(value);
@@ -617,46 +616,46 @@
       // TODO: we need better info about the target CPU.  a vector exclusive or
       //       would probably be better here if we could rely on its existance.
       // Load an immediate +2.0 (which encodes to 0)
-      NewLIR2(cu, kThumb2Vmovd_IMM8, target_reg, 0);
+      NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
       // +0.0 = +2.0 - +2.0
-      res = NewLIR3(cu, kThumb2Vsubd, target_reg, target_reg, target_reg);
+      res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
     } else {
       int encoded_imm = EncodeImmDouble(value);
       if (encoded_imm >= 0) {
-        res = NewLIR2(cu, kThumb2Vmovd_IMM8, target_reg, encoded_imm);
+        res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
       }
     }
   } else {
     if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
-      res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
-      LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+      res = LoadConstantNoClobber(r_dest_lo, val_lo);
+      LoadConstantNoClobber(r_dest_hi, val_hi);
     }
   }
   if (res == NULL) {
     // No short form - load from the literal pool.
-    LIR* data_target = ScanLiteralPoolWide(cu->literal_list, val_lo, val_hi);
+    LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
     if (data_target == NULL) {
-      data_target = AddWideData(cu, &cu->literal_list, val_lo, val_hi);
+      data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
     if (ARM_FPREG(r_dest_lo)) {
-      res = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrd,
+      res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
                    target_reg, r15pc, 0, 0, 0, data_target);
     } else {
-      res = RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrdPcRel8,
+      res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
                    r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
     }
-    SetMemRefType(cu, res, true, kLiteral);
+    SetMemRefType(res, true, kLiteral);
     res->alias_info = reinterpret_cast<uintptr_t>(data_target);
-    AppendLIR(cu, res);
+    AppendLIR(res);
   }
   return res;
 }
 
-int ArmCodegen::EncodeShift(int code, int amount) {
+int ArmMir2Lir::EncodeShift(int code, int amount) {
   return ((amount & 0x1f) << 2) | code;
 }
 
-LIR* ArmCodegen::LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
+LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
                                  int scale, OpSize size)
 {
   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
@@ -685,15 +684,15 @@
   switch (size) {
     case kDouble: // fall-through
     case kSingle:
-      reg_ptr = AllocTemp(cu);
+      reg_ptr = AllocTemp();
       if (scale) {
-        NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
+        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
                 EncodeShift(kArmLsl, scale));
       } else {
-        OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
+        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
       }
-      load = NewLIR3(cu, opcode, r_dest, reg_ptr, 0);
-      FreeTemp(cu, reg_ptr);
+      load = NewLIR3(opcode, r_dest, reg_ptr, 0);
+      FreeTemp(reg_ptr);
       return load;
     case kWord:
       opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
@@ -714,14 +713,14 @@
       LOG(FATAL) << "Bad size: " << size;
   }
   if (thumb_form)
-    load = NewLIR3(cu, opcode, r_dest, rBase, r_index);
+    load = NewLIR3(opcode, r_dest, rBase, r_index);
   else
-    load = NewLIR4(cu, opcode, r_dest, rBase, r_index, scale);
+    load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
 
   return load;
 }
 
-LIR* ArmCodegen::StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
+LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
                                   int scale, OpSize size)
 {
   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
@@ -750,15 +749,15 @@
   switch (size) {
     case kDouble: // fall-through
     case kSingle:
-      reg_ptr = AllocTemp(cu);
+      reg_ptr = AllocTemp();
       if (scale) {
-        NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
+        NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
                 EncodeShift(kArmLsl, scale));
       } else {
-        OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
+        OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
       }
-      store = NewLIR3(cu, opcode, r_src, reg_ptr, 0);
-      FreeTemp(cu, reg_ptr);
+      store = NewLIR3(opcode, r_src, reg_ptr, 0);
+      FreeTemp(reg_ptr);
       return store;
     case kWord:
       opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
@@ -775,9 +774,9 @@
       LOG(FATAL) << "Bad size: " << size;
   }
   if (thumb_form)
-    store = NewLIR3(cu, opcode, r_src, rBase, r_index);
+    store = NewLIR3(opcode, r_src, rBase, r_index);
   else
-    store = NewLIR4(cu, opcode, r_src, rBase, r_index, scale);
+    store = NewLIR4(opcode, r_src, rBase, r_index, scale);
 
   return store;
 }
@@ -787,10 +786,9 @@
  * on base (which must have an associated s_reg and MIR).  If not
  * performing null check, incoming MIR can be null.
  */
-LIR* ArmCodegen::LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
                                   int r_dest_hi, OpSize size, int s_reg)
 {
-  Codegen* cg = cu->cg.get();
   LIR* load = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
@@ -806,7 +804,7 @@
       if (ARM_FPREG(r_dest)) {
         if (ARM_SINGLEREG(r_dest)) {
           DCHECK(ARM_FPREG(r_dest_hi));
-          r_dest = cg->S2d(r_dest, r_dest_hi);
+          r_dest = S2d(r_dest, r_dest_hi);
         }
         opcode = kThumb2Vldrd;
         if (displacement <= 1020) {
@@ -816,11 +814,11 @@
         break;
       } else {
         if (displacement <= 1020) {
-          load = NewLIR4(cu, kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
+          load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
         } else {
-          load = LoadBaseDispBody(cu, rBase, displacement, r_dest,
+          load = LoadBaseDispBody(rBase, displacement, r_dest,
                                  -1, kWord, s_reg);
-          LoadBaseDispBody(cu, rBase, displacement + 4, r_dest_hi,
+          LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
                            -1, kWord, INVALID_SREG);
         }
         already_generated = true;
@@ -893,38 +891,37 @@
 
   if (!already_generated) {
     if (short_form) {
-      load = NewLIR3(cu, opcode, r_dest, rBase, encoded_disp);
+      load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
     } else {
-      int reg_offset = AllocTemp(cu);
-      cg->LoadConstant(cu, reg_offset, encoded_disp);
-      load = cg->LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
-      FreeTemp(cu, reg_offset);
+      int reg_offset = AllocTemp();
+      LoadConstant(reg_offset, encoded_disp);
+      load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
+      FreeTemp(reg_offset);
     }
   }
 
   // TODO: in future may need to differentiate Dalvik accesses w/ spills
   if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(cu, load, displacement >> 2, true /* is_load */, is64bit);
+    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
   }
   return load;
 }
 
-LIR* ArmCodegen::LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
                               OpSize size, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size, s_reg);
+  return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
 }
 
-LIR* ArmCodegen::LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
                                   int r_dest_hi, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
 
-LIR* ArmCodegen::StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
+LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
                                    int r_src, int r_src_hi, OpSize size) {
-  Codegen* cg = cu->cg.get();
   LIR* store = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
@@ -939,16 +936,16 @@
       is64bit = true;
       if (!ARM_FPREG(r_src)) {
         if (displacement <= 1020) {
-          store = NewLIR4(cu, kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
+          store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
         } else {
-          store = StoreBaseDispBody(cu, rBase, displacement, r_src, -1, kWord);
-          StoreBaseDispBody(cu, rBase, displacement + 4, r_src_hi, -1, kWord);
+          store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
+          StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
         }
         already_generated = true;
       } else {
         if (ARM_SINGLEREG(r_src)) {
           DCHECK(ARM_FPREG(r_src_hi));
-          r_src = cg->S2d(r_src, r_src_hi);
+          r_src = S2d(r_src, r_src_hi);
         }
         opcode = kThumb2Vstrd;
         if (displacement <= 1020) {
@@ -1010,35 +1007,35 @@
   }
   if (!already_generated) {
     if (short_form) {
-      store = NewLIR3(cu, opcode, r_src, rBase, encoded_disp);
+      store = NewLIR3(opcode, r_src, rBase, encoded_disp);
     } else {
-      int r_scratch = AllocTemp(cu);
-      cg->LoadConstant(cu, r_scratch, encoded_disp);
-      store = cg->StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
-      FreeTemp(cu, r_scratch);
+      int r_scratch = AllocTemp();
+      LoadConstant(r_scratch, encoded_disp);
+      store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
+      FreeTemp(r_scratch);
     }
   }
 
   // TODO: In future, may need to differentiate Dalvik & spill accesses
   if (rBase == rARM_SP) {
-    AnnotateDalvikRegAccess(cu, store, displacement >> 2, false /* is_load */, is64bit);
+    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
   }
   return store;
 }
 
-LIR* ArmCodegen::StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
                                OpSize size)
 {
-  return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
+  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
 }
 
-LIR* ArmCodegen::StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
+LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
                                    int r_src_lo, int r_src_hi)
 {
-  return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
+  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-LIR* ArmCodegen::OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src)
 {
   int opcode;
   DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
@@ -1052,26 +1049,26 @@
       opcode = kThumb2Fmrs;
     }
   }
-  LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
-  if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-LIR* ArmCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset)
 {
   LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
   return NULL;
 }
 
-LIR* ArmCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp)
 {
   LOG(FATAL) << "Unexpected use of OpMem for Arm";
   return NULL;
 }
 
-LIR* ArmCodegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
                                       int displacement, int r_src, int r_src_hi, OpSize size,
                                       int s_reg)
 {
@@ -1079,13 +1076,13 @@
   return NULL;
 }
 
-LIR* ArmCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase, int offset)
+LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
   return NULL;
 }
 
-LIR* ArmCodegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
                                      int displacement, int r_dest, int r_dest_hi, OpSize size,
                                      int s_reg)
 {
diff --git a/src/compiler/dex/quick/codegen.h b/src/compiler/dex/quick/codegen.h
deleted file mode 100644
index 272ccad..0000000
--- a/src/compiler/dex/quick/codegen.h
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
-#define ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
-
-#include "invoke_type.h"
-#include "compiler/dex/compiler_enums.h"
-#include "compiler/dex/compiler_ir.h"
-
-namespace art {
-
-// Set to 1 to measure cost of suspend check.
-#define NO_SUSPEND 0
-
-#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
-#define IS_BRANCH            (1ULL << kIsBranch)
-#define IS_IT                (1ULL << kIsIT)
-#define IS_LOAD              (1ULL << kMemLoad)
-#define IS_QUAD_OP           (1ULL << kIsQuadOp)
-#define IS_QUIN_OP           (1ULL << kIsQuinOp)
-#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
-#define IS_STORE             (1ULL << kMemStore)
-#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
-#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
-#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
-#define NO_OPERAND           (1ULL << kNoOperand)
-#define REG_DEF0             (1ULL << kRegDef0)
-#define REG_DEF1             (1ULL << kRegDef1)
-#define REG_DEFA             (1ULL << kRegDefA)
-#define REG_DEFD             (1ULL << kRegDefD)
-#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
-#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
-#define REG_DEF_LIST0        (1ULL << kRegDefList0)
-#define REG_DEF_LIST1        (1ULL << kRegDefList1)
-#define REG_DEF_LR           (1ULL << kRegDefLR)
-#define REG_DEF_SP           (1ULL << kRegDefSP)
-#define REG_USE0             (1ULL << kRegUse0)
-#define REG_USE1             (1ULL << kRegUse1)
-#define REG_USE2             (1ULL << kRegUse2)
-#define REG_USE3             (1ULL << kRegUse3)
-#define REG_USE4             (1ULL << kRegUse4)
-#define REG_USEA             (1ULL << kRegUseA)
-#define REG_USEC             (1ULL << kRegUseC)
-#define REG_USED             (1ULL << kRegUseD)
-#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
-#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
-#define REG_USE_LIST0        (1ULL << kRegUseList0)
-#define REG_USE_LIST1        (1ULL << kRegUseList1)
-#define REG_USE_LR           (1ULL << kRegUseLR)
-#define REG_USE_PC           (1ULL << kRegUsePC)
-#define REG_USE_SP           (1ULL << kRegUseSP)
-#define SETS_CCODES          (1ULL << kSetsCCodes)
-#define USES_CCODES          (1ULL << kUsesCCodes)
-
-// Common combo register usage patterns.
-#define REG_DEF01            (REG_DEF0 | REG_DEF1)
-#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
-#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
-#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
-#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
-#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
-#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
-#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
-#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
-#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
-#define REG_USE012           (REG_USE01 | REG_USE2)
-#define REG_USE014           (REG_USE01 | REG_USE4)
-#define REG_USE01            (REG_USE0 | REG_USE1)
-#define REG_USE02            (REG_USE0 | REG_USE2)
-#define REG_USE12            (REG_USE1 | REG_USE2)
-#define REG_USE23            (REG_USE2 | REG_USE3)
-
-struct BasicBlock;
-struct CallInfo;
-struct CompilationUnit;
-struct LIR;
-struct MIR;
-struct RegLocation;
-struct RegisterInfo;
-
-typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
-                            uint32_t method_idx, uintptr_t direct_code,
-                            uintptr_t direct_method, InvokeType type);
-
-// Target-specific initialization.
-bool InitArmCodegen(CompilationUnit* cu);
-bool InitMipsCodegen(CompilationUnit* cu);
-bool InitX86Codegen(CompilationUnit* cu);
-
-class Codegen {
-
-  public:
-
-    struct SwitchTable {
-      int offset;
-      const uint16_t* table;      // Original dex table.
-      int vaddr;                  // Dalvik offset of switch opcode.
-      LIR* anchor;                // Reference instruction for relative offsets.
-      LIR** targets;              // Array of case targets.
-    };
-
-    struct FillArrayData {
-      int offset;
-      const uint16_t* table;      // Original dex table.
-      int size;
-      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
-    };
-
-    virtual ~Codegen(){};
-
-    // Shared by all targets - implemented in ralloc_util.cc
-    void SimpleRegAlloc(CompilationUnit* cu);
-
-    // Shared by all targets - implemented in gen_common.cc.
-    void HandleSuspendLaunchPads(CompilationUnit *cu);
-    void HandleIntrinsicLaunchPads(CompilationUnit *cu);
-    void HandleThrowLaunchPads(CompilationUnit *cu);
-    void GenBarrier(CompilationUnit* cu);
-    LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind);
-    LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
-                       ThrowKind kind);
-    LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags);
-    LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
-                        ThrowKind kind);
-    void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src1,
-                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
-    void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src,
-                                 LIR* taken, LIR* fall_through);
-    void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src);
-    void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
-                     RegLocation rl_src);
-    void GenFilledNewArray(CompilationUnit* cu, CallInfo* info);
-    void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
-                 bool is_long_or_double, bool is_object);
-    void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
-                 bool is_long_or_double, bool is_object);
-    void GenShowTarget(CompilationUnit* cu);
-    void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
-                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
-    void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
-                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
-    void GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
-    void GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest);
-    void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
-    void GenThrow(CompilationUnit* cu, RegLocation rl_src);
-    void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
-                       RegLocation rl_src);
-    void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
-    void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest,
-                      RegLocation rl_src1, RegLocation rl_src2);
-    void GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_shift);
-    void GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                       RegLocation rl_src1, RegLocation rl_src2);
-    void GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src, int lit);
-    void GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_src2);
-    void GenConversionCall(CompilationUnit* cu, int func_offset, RegLocation rl_dest,
-                           RegLocation rl_src);
-    void GenSuspendTest(CompilationUnit* cu, int opt_flags);
-    void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target);
-
-    // Shared by all targets - implemented in gen_invoke.cc.
-    int CallHelperSetup(CompilationUnit* cu, int helper_offset);
-    LIR* CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc);
-    void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
-                                       bool safepoint_pc);
-    void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
-                                         RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0,
-                                         int arg1, bool safepoint_pc);
-    void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
-                                    bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
-                                                 RegLocation arg0, RegLocation arg1,
-                                                 bool safepoint_pc);
-    void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                    int arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
-                                               RegLocation arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2,
-                                       bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
-                                                    int arg0, RegLocation arg1, RegLocation arg2,
-                                                    bool safepoint_pc);
-    void GenInvoke(CompilationUnit* cu, CallInfo* info);
-    void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
-    int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
-                             NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
-                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                             bool skip_this);
-    int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
-                           NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
-                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                           bool skip_this);
-    RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info);
-    RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info);
-    CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
-                             bool is_range);
-    bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info);
-    bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty);
-    bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info);
-    bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info);
-    bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info);
-    bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info);
-    bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based);
-    bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info);
-    bool GenInlinedCurrentThread(CompilationUnit* cu, CallInfo* info);
-    bool GenInlinedUnsafeGet(CompilationUnit* cu, CallInfo* info, bool is_long, bool is_volatile);
-    bool GenInlinedUnsafePut(CompilationUnit* cu, CallInfo* info, bool is_long, bool is_object,
-                             bool is_volatile, bool is_ordered);
-    bool GenIntrinsic(CompilationUnit* cu, CallInfo* info);
-
-    // Shared by all targets - implemented in gen_loadstore.cc.
-    RegLocation LoadCurrMethod(CompilationUnit *cu);
-    void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt);
-    LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value);
-    LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest);
-    RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
-    RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
-    void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest);
-    void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest);
-    void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
-    void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
-    LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src);
-    void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-
-    // Required for target - codegen helpers.
-    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
-    virtual int LoadHelper(CompilationUnit* cu, int offset) = 0;
-    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                              OpSize size, int s_reg) = 0;
-    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
-                                  int r_dest_hi, int s_reg) = 0;
-    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
-                                 OpSize size) = 0;
-    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
-                                     int s_reg) = 0;
-    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value) = 0;
-    virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
-                                  int64_t value) = 0;
-    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                               OpSize size) = 0;
-    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
-                                   int r_src_hi) = 0;
-    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
-                                 OpSize size) = 0;
-    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                      int displacement, int r_src, int r_src_hi, OpSize size,
-                                      int s_reg) = 0;
-    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg) = 0;
-
-    // Required for target - register utilities.
-    virtual bool IsFpReg(int reg) = 0;
-    virtual bool SameRegType(int reg1, int reg2) = 0;
-    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
-    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
-    virtual int S2d(int low_reg, int high_reg) = 0;
-    virtual int TargetReg(SpecialTargetRegister reg) = 0;
-    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg) = 0;
-    virtual RegLocation GetReturnAlt(CompilationUnit* cu) = 0;
-    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu) = 0;
-    virtual RegLocation LocCReturn() = 0;
-    virtual RegLocation LocCReturnDouble() = 0;
-    virtual RegLocation LocCReturnFloat() = 0;
-    virtual RegLocation LocCReturnWide() = 0;
-    virtual uint32_t FpRegMask() = 0;
-    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg) = 0;
-    virtual void AdjustSpillMask(CompilationUnit* cu) = 0;
-    virtual void ClobberCalleeSave(CompilationUnit *cu) = 0;
-    virtual void FlushReg(CompilationUnit* cu, int reg) = 0;
-    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2) = 0;
-    virtual void FreeCallTemps(CompilationUnit* cu) = 0;
-    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free) = 0;
-    virtual void LockCallTemps(CompilationUnit* cu) = 0;
-    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg) = 0;
-    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu) = 0;
-
-    // Required for target - miscellaneous.
-    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr) = 0;
-    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
-    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir) = 0;
-    virtual const char* GetTargetInstFmt(int opcode) = 0;
-    virtual const char* GetTargetInstName(int opcode) = 0;
-    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
-    virtual uint64_t GetPCUseDefEncoding() = 0;
-    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
-    virtual int GetInsnSize(LIR* lir) = 0;
-    virtual bool IsUnconditionalBranch(LIR* lir) = 0;
-
-    // Required for target - Dalvik-level generators.
-    virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2) = 0;
-    virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
-                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src) = 0;
-    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) = 0;
-    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min) = 0;
-    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) = 0;
-    virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) = 0;
-    virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
-                                int offset, ThrowKind kind) = 0;
-    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
-                                  bool is_div) = 0;
-    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
-                                     bool is_div) = 0;
-    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi) = 0;
-    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                                  RegLocation rl_method) = 0;
-    virtual void GenExitSequence(CompilationUnit* cu) = 0;
-    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                                  RegLocation rl_src) = 0;
-    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double) = 0;
-    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
-    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
-    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind) = 0;
-    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
-    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
-    virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest) = 0;
-    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                               RegLocation rl_result, int lit, int first_bit,
-                                               int second_bit) = 0;
-    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src) = 0;
-    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src) = 0;
-    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                                SpecialCaseHandler special_case) = 0;
-    virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
-                                RegLocation rl_index, RegLocation rl_src, int scale) = 0;
-    virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
-    virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_src, int scale) = 0;
-    virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1,
-                                   RegLocation rl_shift) = 0;
-
-    // Required for target - single operation generators.
-    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target) = 0;
-    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
-                             LIR* target) = 0;
-    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
-                                LIR* target) = 0;
-    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target) = 0;
-    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
-                                LIR* target) = 0;
-    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
-    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide) = 0;
-    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp) = 0;
-    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) = 0;
-    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src) = 0;
-    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
-    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src) = 0;
-    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value) = 0;
-    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset) = 0;
-    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2) = 0;
-    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value) = 0;
-    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                             int r_src2) = 0;
-    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target) = 0;
-    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset) = 0;
-    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count) = 0;
-    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count) = 0;
-    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
-                       int offset) = 0;
-    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
-                               int src_hi) = 0;
-    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val) = 0;
-    virtual bool InexpensiveConstantInt(int32_t value) = 0;
-    virtual bool InexpensiveConstantFloat(int32_t value) = 0;
-    virtual bool InexpensiveConstantLong(int64_t value) = 0;
-    virtual bool InexpensiveConstantDouble(int64_t value) = 0;
-
-    // Temp workaround
-    void Workaround7250540(CompilationUnit* cu, RegLocation rl_dest, int value);
-    };  // Class Codegen
-
-}  // namespace art
-
-#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index b5152df..91422ea 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -19,58 +19,54 @@
 #include "gc_map.h"
 #include "verifier/dex_gc_map.h"
 #include "verifier/method_verifier.h"
-#include "ralloc_util.h"
-#include "codegen_util.h"
 
 namespace art {
 
-bool IsInexpensiveConstant(CompilationUnit* cu, RegLocation rl_src)
+bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src)
 {
   bool res = false;
   if (rl_src.is_const) {
     if (rl_src.wide) {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantDouble(cu->mir_graph->ConstantValueWide(rl_src));
+         res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantLong(cu->mir_graph->ConstantValueWide(rl_src));
+         res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
       }
     } else {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantFloat(cu->mir_graph->ConstantValue(rl_src));
+         res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src));
+         res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
       }
     }
   }
   return res;
 }
 
-void MarkSafepointPC(CompilationUnit* cu, LIR* inst)
+void Mir2Lir::MarkSafepointPC(LIR* inst)
 {
   inst->def_mask = ENCODE_ALL;
-  LIR* safepoint_pc = NewLIR0(cu, kPseudoSafepointPC);
+  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
   DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
 }
 
-bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
-                  int& field_offset, bool& is_volatile, bool is_put)
+bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put)
 {
-  return cu->compiler_driver->ComputeInstanceFieldInfo(
-      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
+  return cu_->compiler_driver->ComputeInstanceFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
 }
 
 /* Convert an instruction to a NOP */
-void NopLIR( LIR* lir)
+void Mir2Lir::NopLIR( LIR* lir)
 {
   lir->flags.is_nop = true;
 }
 
-void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type)
+void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type)
 {
   uint64_t *mask_ptr;
   uint64_t mask = ENCODE_MEM;;
-  Codegen* cg = cu->cg.get();
-  DCHECK(cg->GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
+  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
   if (is_load) {
     mask_ptr = &lir->use_mask;
   } else {
@@ -92,7 +88,7 @@
       break;
     case kMustNotAlias:
       /* Currently only loads can be marked as kMustNotAlias */
-      DCHECK(!(cg->GetTargetInstFlags(lir->opcode) & IS_STORE));
+      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
       *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
       break;
     default:
@@ -103,9 +99,10 @@
 /*
  * Mark load/store instructions that access Dalvik registers through the stack.
  */
-void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit)
+void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
+                                      bool is64bit)
 {
-  SetMemRefType(cu, lir, is_load, kDalvikReg);
+  SetMemRefType(lir, is_load, kDalvikReg);
 
   /*
    * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
@@ -117,38 +114,36 @@
 /*
  * Mark the corresponding bit(s).
  */
-void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg)
+void Mir2Lir::SetupRegMask(uint64_t* mask, int reg)
 {
-  Codegen* cg = cu->cg.get();
-  *mask |= cg->GetRegMaskCommon(cu, reg);
+  *mask |= GetRegMaskCommon(reg);
 }
 
 /*
  * Set up the proper fields in the resource mask
  */
-void SetupResourceMasks(CompilationUnit* cu, LIR* lir)
+void Mir2Lir::SetupResourceMasks(LIR* lir)
 {
   int opcode = lir->opcode;
-  Codegen* cg = cu->cg.get();
 
   if (opcode <= 0) {
     lir->use_mask = lir->def_mask = 0;
     return;
   }
 
-  uint64_t flags = cg->GetTargetInstFlags(opcode);
+  uint64_t flags = GetTargetInstFlags(opcode);
 
   if (flags & NEEDS_FIXUP) {
     lir->flags.pcRelFixup = true;
   }
 
   /* Get the starting size of the instruction's template */
-  lir->flags.size = cg->GetInsnSize(lir);
+  lir->flags.size = GetInsnSize(lir);
 
   /* Set up the mask for resources that are updated */
   if (flags & (IS_LOAD | IS_STORE)) {
     /* Default to heap - will catch specialized classes later */
-    SetMemRefType(cu, lir, flags & IS_LOAD, kHeapRef);
+    SetMemRefType(lir, flags & IS_LOAD, kHeapRef);
   }
 
   /*
@@ -161,11 +156,11 @@
   }
 
   if (flags & REG_DEF0) {
-    SetupRegMask(cu, &lir->def_mask, lir->operands[0]);
+    SetupRegMask(&lir->def_mask, lir->operands[0]);
   }
 
   if (flags & REG_DEF1) {
-    SetupRegMask(cu, &lir->def_mask, lir->operands[1]);
+    SetupRegMask(&lir->def_mask, lir->operands[1]);
   }
 
 
@@ -178,7 +173,7 @@
 
     for (i = 0; i < 4; i++) {
       if (flags & (1 << (kRegUse0 + i))) {
-        SetupRegMask(cu, &lir->use_mask, lir->operands[i]);
+        SetupRegMask(&lir->use_mask, lir->operands[i]);
       }
     }
   }
@@ -188,7 +183,7 @@
   }
 
   // Handle target-specific actions
-  cg->SetupTargetResourceMasks(cu, lir);
+  SetupTargetResourceMasks(lir);
 }
 
 /*
@@ -197,18 +192,17 @@
 #define DUMP_RESOURCE_MASK(X)
 
 /* Pretty-print a LIR instruction */
-void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr)
+void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr)
 {
   int offset = lir->offset;
   int dest = lir->operands[0];
-  const bool dump_nop = (cu->enable_debug & (1 << kDebugShowNops));
-  Codegen* cg = cu->cg.get();
+  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
 
   /* Handle pseudo-ops individually, and all regular insns as a group */
   switch (lir->opcode) {
     case kPseudoMethodEntry:
       LOG(INFO) << "-------- method entry "
-                << PrettyMethod(cu->method_idx, *cu->dex_file);
+                << PrettyMethod(cu_->method_idx, *cu_->dex_file);
       break;
     case kPseudoMethodExit:
       LOG(INFO) << "-------- Method_Exit";
@@ -264,9 +258,9 @@
       if (lir->flags.is_nop && !dump_nop) {
         break;
       } else {
-        std::string op_name(cg->BuildInsnString(cg->GetTargetInstName(lir->opcode),
+        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
                                                lir, base_addr));
-        std::string op_operands(cg->BuildInsnString(cg->GetTargetInstFmt(lir->opcode),
+        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
                                                     lir, base_addr));
         LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
                                   reinterpret_cast<unsigned int>(base_addr + offset),
@@ -284,38 +278,37 @@
   }
 }
 
-void DumpPromotionMap(CompilationUnit *cu)
+void Mir2Lir::DumpPromotionMap()
 {
-  Codegen* cg = cu->cg.get();
-  int num_regs = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
+  int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
   for (int i = 0; i < num_regs; i++) {
-    PromotionMap v_reg_map = cu->promotion_map[i];
+    PromotionMap v_reg_map = promotion_map_[i];
     std::string buf;
     if (v_reg_map.fp_location == kLocPhysReg) {
-      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & cg->FpRegMask());
+      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
     }
 
     std::string buf3;
-    if (i < cu->num_dalvik_registers) {
+    if (i < cu_->num_dalvik_registers) {
       StringAppendF(&buf3, "%02d", i);
-    } else if (i == cu->method_sreg) {
+    } else if (i == mir_graph_->GetMethodSReg()) {
       buf3 = "Method*";
     } else {
-      StringAppendF(&buf3, "ct%d", i - cu->num_dalvik_registers);
+      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
     }
 
     LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
                               v_reg_map.core_location == kLocPhysReg ?
                               "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
-                              v_reg_map.core_reg : SRegOffset(cu, i),
+                              v_reg_map.core_reg : SRegOffset(i),
                               buf.c_str());
   }
 }
 
 /* Dump a mapping table */
-static void DumpMappingTable(const char* table_name, const std::string& descriptor,
-                             const std::string& name, const std::string& signature,
-                             const std::vector<uint32_t>& v) {
+void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor,
+                               const std::string& name, const std::string& signature,
+                               const std::vector<uint32_t>& v) {
   if (v.size() > 0) {
     std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
                      descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
@@ -330,49 +323,49 @@
 }
 
 /* Dump instructions and constant pool contents */
-void CodegenDump(CompilationUnit* cu)
+void Mir2Lir::CodegenDump()
 {
   LOG(INFO) << "Dumping LIR insns for "
-            << PrettyMethod(cu->method_idx, *cu->dex_file);
+            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   LIR* lir_insn;
-  int insns_size = cu->code_item->insns_size_in_code_units_;
+  int insns_size = cu_->code_item->insns_size_in_code_units_;
 
-  LOG(INFO) << "Regs (excluding ins) : " << cu->num_regs;
-  LOG(INFO) << "Ins          : " << cu->num_ins;
-  LOG(INFO) << "Outs         : " << cu->num_outs;
-  LOG(INFO) << "CoreSpills       : " << cu->num_core_spills;
-  LOG(INFO) << "FPSpills       : " << cu->num_fp_spills;
-  LOG(INFO) << "CompilerTemps    : " << cu->num_compiler_temps;
-  LOG(INFO) << "Frame size       : " << cu->frame_size;
-  LOG(INFO) << "code size is " << cu->total_size <<
+  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
+  LOG(INFO) << "Ins          : " << cu_->num_ins;
+  LOG(INFO) << "Outs         : " << cu_->num_outs;
+  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
+  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
+  LOG(INFO) << "CompilerTemps    : " << cu_->num_compiler_temps;
+  LOG(INFO) << "Frame size       : " << frame_size_;
+  LOG(INFO) << "code size is " << total_size_ <<
     " bytes, Dalvik size is " << insns_size * 2;
   LOG(INFO) << "expansion factor: "
-            << static_cast<float>(cu->total_size) / static_cast<float>(insns_size * 2);
-  DumpPromotionMap(cu);
-  for (lir_insn = cu->first_lir_insn; lir_insn != NULL; lir_insn = lir_insn->next) {
-    DumpLIRInsn(cu, lir_insn, 0);
+            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
+  DumpPromotionMap();
+  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+    DumpLIRInsn(lir_insn, 0);
   }
-  for (lir_insn = cu->literal_list; lir_insn != NULL; lir_insn = lir_insn->next) {
+  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
     LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
                               lir_insn->operands[0]);
   }
 
   const DexFile::MethodId& method_id =
-      cu->dex_file->GetMethodId(cu->method_idx);
-  std::string signature(cu->dex_file->GetMethodSignature(method_id));
-  std::string name(cu->dex_file->GetMethodName(method_id));
-  std::string descriptor(cu->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+      cu_->dex_file->GetMethodId(cu_->method_idx);
+  std::string signature(cu_->dex_file->GetMethodSignature(method_id));
+  std::string name(cu_->dex_file->GetMethodName(method_id));
+  std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
 
   // Dump mapping tables
-  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, cu->pc2dexMappingTable);
-  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, cu->dex2pcMappingTable);
+  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
+  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
 }
 
 
-LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0,
-      int op1, int op2, int op3, int op4, LIR* target)
+LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+                     int op1, int op2, int op3, int op4, LIR* target)
 {
-  LIR* insn = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+  LIR* insn = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
   insn->dalvik_offset = dalvik_offset;
   insn->opcode = opcode;
   insn->operands[0] = op0;
@@ -381,7 +374,7 @@
   insn->operands[3] = op3;
   insn->operands[4] = op4;
   insn->target = target;
-  SetupResourceMasks(cu, insn);
+  SetupResourceMasks(insn);
   if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
       (opcode == kPseudoExportedPC)) {
     // Always make labels scheduling barriers
@@ -394,80 +387,70 @@
  * The following are building blocks to construct low-level IRs with 0 - 4
  * operands.
  */
-LIR* NewLIR0(CompilationUnit* cu, int opcode)
+LIR* Mir2Lir::NewLIR0(int opcode)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & NO_OPERAND))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode);
+  AppendLIR(insn);
   return insn;
 }
 
-LIR* NewLIR1(CompilationUnit* cu, int opcode,
-               int dest)
+LIR* Mir2Lir::NewLIR1(int opcode, int dest)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_UNARY_OP))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
+  AppendLIR(insn);
   return insn;
 }
 
-LIR* NewLIR2(CompilationUnit* cu, int opcode,
-               int dest, int src1)
+LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_BINARY_OP))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
+  AppendLIR(insn);
   return insn;
 }
 
-LIR* NewLIR3(CompilationUnit* cu, int opcode,
-               int dest, int src1, int src2)
+LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
+  AppendLIR(insn);
   return insn;
 }
 
-LIR* NewLIR4(CompilationUnit* cu, int opcode,
-      int dest, int src1, int src2, int info)
+LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUAD_OP))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
+  AppendLIR(insn);
   return insn;
 }
 
-LIR* NewLIR5(CompilationUnit* cu, int opcode,
-       int dest, int src1, int src2, int info1, int info2)
+LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
+                      int info2)
 {
-  Codegen* cg = cu->cg.get();
-  DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUIN_OP))
-      << cg->GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
-      << cu->current_dalvik_offset;
-  LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info1, info2);
-  AppendLIR(cu, insn);
+  DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
+      << GetTargetInstName(opcode) << " " << opcode << " "
+      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
+      << current_dalvik_offset_;
+  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
+  AppendLIR(insn);
   return insn;
 }
 
@@ -475,7 +458,7 @@
  * Search the existing constants in the literal pool for an exact or close match
  * within specified delta (greater or equal to 0).
  */
-LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta)
+LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta)
 {
   while (data_target) {
     if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
@@ -486,7 +469,7 @@
 }
 
 /* Search the existing constants in the literal pool for an exact wide match */
-LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi)
+LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi)
 {
   bool lo_match = false;
   LIR* lo_target = NULL;
@@ -512,11 +495,11 @@
  */
 
 /* Add a 32-bit constant to the constant pool */
-LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value)
+LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value)
 {
   /* Add the constant to the literal pool */
   if (constant_list_p) {
-    LIR* new_value = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocData));
+    LIR* new_value = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocData));
     new_value->operands[0] = value;
     new_value->next = *constant_list_p;
     *constant_list_p = new_value;
@@ -526,11 +509,10 @@
 }
 
 /* Add a 64-bit constant to the constant pool or mixed with code */
-LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p,
-               int val_lo, int val_hi)
+LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi)
 {
-  AddWordData(cu, constant_list_p, val_hi);
-  return AddWordData(cu, constant_list_p, val_lo);
+  AddWordData(constant_list_p, val_hi);
+  return AddWordData(constant_list_p, val_lo);
 }
 
 static void PushWord(std::vector<uint8_t>&buf, int data) {
@@ -547,57 +529,57 @@
 }
 
 /* Write the literal pool to the output stream */
-static void InstallLiteralPools(CompilationUnit* cu)
+void Mir2Lir::InstallLiteralPools()
 {
-  AlignBuffer(cu->code_buffer, cu->data_offset);
-  LIR* data_lir = cu->literal_list;
+  AlignBuffer(code_buffer_, data_offset_);
+  LIR* data_lir = literal_list_;
   while (data_lir != NULL) {
-    PushWord(cu->code_buffer, data_lir->operands[0]);
+    PushWord(code_buffer_, data_lir->operands[0]);
     data_lir = NEXT_LIR(data_lir);
   }
   // Push code and method literals, record offsets for the compiler to patch.
-  data_lir = cu->code_literal_list;
+  data_lir = code_literal_list_;
   while (data_lir != NULL) {
     uint32_t target = data_lir->operands[0];
-    cu->compiler_driver->AddCodePatch(cu->dex_file,
-                                      cu->method_idx,
-                                      cu->invoke_type,
+    cu_->compiler_driver->AddCodePatch(cu_->dex_file,
+                                      cu_->method_idx,
+                                      cu_->invoke_type,
                                       target,
                                       static_cast<InvokeType>(data_lir->operands[1]),
-                                      cu->code_buffer.size());
-    const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
+                                      code_buffer_.size());
+    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
     // unique based on target to ensure code deduplication works
     uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
-    PushWord(cu->code_buffer, unique_patch_value);
+    PushWord(code_buffer_, unique_patch_value);
     data_lir = NEXT_LIR(data_lir);
   }
-  data_lir = cu->method_literal_list;
+  data_lir = method_literal_list_;
   while (data_lir != NULL) {
     uint32_t target = data_lir->operands[0];
-    cu->compiler_driver->AddMethodPatch(cu->dex_file,
-                                        cu->method_idx,
-                                        cu->invoke_type,
+    cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
+                                        cu_->method_idx,
+                                        cu_->invoke_type,
                                         target,
                                         static_cast<InvokeType>(data_lir->operands[1]),
-                                        cu->code_buffer.size());
-    const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
+                                        code_buffer_.size());
+    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
     // unique based on target to ensure code deduplication works
     uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
-    PushWord(cu->code_buffer, unique_patch_value);
+    PushWord(code_buffer_, unique_patch_value);
     data_lir = NEXT_LIR(data_lir);
   }
 }
 
 /* Write the switch tables to the output stream */
-static void InstallSwitchTables(CompilationUnit* cu)
+void Mir2Lir::InstallSwitchTables()
 {
   GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->switch_tables, &iterator);
+  GrowableListIteratorInit(&switch_tables_, &iterator);
   while (true) {
-    Codegen::SwitchTable* tab_rec =
-      reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext( &iterator));
+    Mir2Lir::SwitchTable* tab_rec =
+      reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
-    AlignBuffer(cu->code_buffer, tab_rec->offset);
+    AlignBuffer(code_buffer_, tab_rec->offset);
     /*
      * For Arm, our reference point is the address of the bx
      * instruction that does the launch, so we have to subtract
@@ -605,7 +587,7 @@
      * is a label, so we can use the offset as-is.
      */
     int bx_offset = INVALID_OFFSET;
-    switch (cu->instruction_set) {
+    switch (cu_->instruction_set) {
       case kThumb2:
         bx_offset = tab_rec->anchor->offset + 4;
         break;
@@ -615,22 +597,22 @@
       case kMips:
         bx_offset = tab_rec->anchor->offset;
         break;
-      default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
     }
-    if (cu->verbose) {
+    if (cu_->verbose) {
       LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
     }
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
       const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
       for (int elems = 0; elems < tab_rec->table[1]; elems++) {
         int disp = tab_rec->targets[elems]->offset - bx_offset;
-        if (cu->verbose) {
+        if (cu_->verbose) {
           LOG(INFO) << "  Case[" << elems << "] key: 0x"
                     << std::hex << keys[elems] << ", disp: 0x"
                     << std::hex << disp;
         }
-        PushWord(cu->code_buffer, keys[elems]);
-        PushWord(cu->code_buffer,
+        PushWord(code_buffer_, keys[elems]);
+        PushWord(code_buffer_,
           tab_rec->targets[elems]->offset - bx_offset);
       }
     } else {
@@ -638,29 +620,29 @@
                 static_cast<int>(Instruction::kPackedSwitchSignature));
       for (int elems = 0; elems < tab_rec->table[1]; elems++) {
         int disp = tab_rec->targets[elems]->offset - bx_offset;
-        if (cu->verbose) {
+        if (cu_->verbose) {
           LOG(INFO) << "  Case[" << elems << "] disp: 0x"
                     << std::hex << disp;
         }
-        PushWord(cu->code_buffer, tab_rec->targets[elems]->offset - bx_offset);
+        PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
       }
     }
   }
 }
 
 /* Write the fill array dta to the output stream */
-static void InstallFillArrayData(CompilationUnit* cu)
+void Mir2Lir::InstallFillArrayData()
 {
   GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->fill_array_data, &iterator);
+  GrowableListIteratorInit(&fill_array_data_, &iterator);
   while (true) {
-    Codegen::FillArrayData *tab_rec =
-        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext( &iterator));
+    Mir2Lir::FillArrayData *tab_rec =
+        reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
-    AlignBuffer(cu->code_buffer, tab_rec->offset);
+    AlignBuffer(code_buffer_, tab_rec->offset);
     for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
-      cu->code_buffer.push_back( tab_rec->table[i] & 0xFF);
-      cu->code_buffer.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+      code_buffer_.push_back( tab_rec->table[i] & 0xFF);
+      code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF);
     }
   }
 }
@@ -675,15 +657,15 @@
 }
 
 // Make sure we have a code address for every declared catch entry
-static bool VerifyCatchEntries(CompilationUnit* cu)
+bool Mir2Lir::VerifyCatchEntries()
 {
   bool success = true;
-  for (std::set<uint32_t>::const_iterator it = cu->mir_graph->catches_.begin();
-       it != cu->mir_graph->catches_.end(); ++it) {
+  for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
+       it != mir_graph_->catches_.end(); ++it) {
     uint32_t dex_pc = *it;
     bool found = false;
-    for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
-      if (dex_pc == cu->dex2pcMappingTable[i+1]) {
+    for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
+      if (dex_pc == dex2pc_mapping_table_[i+1]) {
         found = true;
         break;
       }
@@ -694,46 +676,44 @@
     }
   }
   // Now, try in the other direction
-  for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
-    uint32_t dex_pc = cu->dex2pcMappingTable[i+1];
-    if (cu->mir_graph->catches_.find(dex_pc) == cu->mir_graph->catches_.end()) {
+  for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
+    uint32_t dex_pc = dex2pc_mapping_table_[i+1];
+    if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
       LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
       success = false;
     }
   }
   if (!success) {
-    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "Entries @ decode: " << cu->mir_graph->catches_.size() << ", Entries in table: "
-              << cu->dex2pcMappingTable.size()/2;
+    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
+              << dex2pc_mapping_table_.size()/2;
   }
   return success;
 }
 
 
-static void CreateMappingTables(CompilationUnit* cu)
+void Mir2Lir::CreateMappingTables()
 {
-  for (LIR* tgt_lir = cu->first_lir_insn; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
-      cu->pc2dexMappingTable.push_back(tgt_lir->offset);
-      cu->pc2dexMappingTable.push_back(tgt_lir->dalvik_offset);
+      pc2dex_mapping_table_.push_back(tgt_lir->offset);
+      pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
     }
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
-      cu->dex2pcMappingTable.push_back(tgt_lir->offset);
-      cu->dex2pcMappingTable.push_back(tgt_lir->dalvik_offset);
+      dex2pc_mapping_table_.push_back(tgt_lir->offset);
+      dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
     }
   }
   if (kIsDebugBuild) {
-    DCHECK(VerifyCatchEntries(cu));
+    DCHECK(VerifyCatchEntries());
   }
-  cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size() +
-                                        cu->dex2pcMappingTable.size());
-  cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size());
-  cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
-                                     cu->pc2dexMappingTable.begin(),
-                                     cu->pc2dexMappingTable.end());
-  cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
-                                     cu->dex2pcMappingTable.begin(),
-                                     cu->dex2pcMappingTable.end());
+  combined_mapping_table_.push_back(pc2dex_mapping_table_.size() +
+                                        dex2pc_mapping_table_.size());
+  combined_mapping_table_.push_back(pc2dex_mapping_table_.size());
+  combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(),
+                                 pc2dex_mapping_table_.end());
+  combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(),
+                                 dex2pc_mapping_table_.end());
 }
 
 class NativePcToReferenceMapBuilder {
@@ -814,8 +794,8 @@
   std::vector<uint8_t>* const table_;
 };
 
-static void CreateNativeGcMap(CompilationUnit* cu) {
-  const std::vector<uint32_t>& mapping_table = cu->pc2dexMappingTable;
+void Mir2Lir::CreateNativeGcMap() {
+  const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
   uint32_t max_native_offset = 0;
   for (size_t i = 0; i < mapping_table.size(); i += 2) {
     uint32_t native_offset = mapping_table[i + 0];
@@ -823,11 +803,11 @@
       max_native_offset = native_offset;
     }
   }
-  CompilerDriver::MethodReference method_ref(cu->dex_file, cu->method_idx);
+  CompilerDriver::MethodReference method_ref(cu_->dex_file, cu_->method_idx);
   const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
   verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
   // Compute native offset to references size.
-  NativePcToReferenceMapBuilder native_gc_map_builder(&cu->native_gc_map,
+  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
                                                       mapping_table.size() / 2, max_native_offset,
                                                       dex_gc_map.RegWidth());
 
@@ -841,21 +821,21 @@
 }
 
 /* Determine the offset of each literal field */
-static int AssignLiteralOffset(CompilationUnit* cu, int offset)
+int Mir2Lir::AssignLiteralOffset(int offset)
 {
-  offset = AssignLiteralOffsetCommon(cu->literal_list, offset);
-  offset = AssignLiteralOffsetCommon(cu->code_literal_list, offset);
-  offset = AssignLiteralOffsetCommon(cu->method_literal_list, offset);
+  offset = AssignLiteralOffsetCommon(literal_list_, offset);
+  offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
+  offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
   return offset;
 }
 
-static int AssignSwitchTablesOffset(CompilationUnit* cu, int offset)
+int Mir2Lir::AssignSwitchTablesOffset(int offset)
 {
   GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->switch_tables, &iterator);
+  GrowableListIteratorInit(&switch_tables_, &iterator);
   while (true) {
-    Codegen::SwitchTable *tab_rec =
-        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::SwitchTable *tab_rec =
+        reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -869,13 +849,13 @@
   return offset;
 }
 
-static int AssignFillArrayDataOffset(CompilationUnit* cu, int offset)
+int Mir2Lir::AssignFillArrayDataOffset(int offset)
 {
   GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->fill_array_data, &iterator);
+  GrowableListIteratorInit(&fill_array_data_, &iterator);
   while (true) {
-    Codegen::FillArrayData *tab_rec =
-        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::FillArrayData *tab_rec =
+        reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     offset += tab_rec->size;
@@ -886,12 +866,12 @@
 }
 
 // LIR offset assignment.
-static int AssignInsnOffsets(CompilationUnit* cu)
+int Mir2Lir::AssignInsnOffsets()
 {
   LIR* lir;
   int offset = 0;
 
-  for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
     lir->offset = offset;
     if (lir->opcode >= 0) {
       if (!lir->flags.is_nop) {
@@ -915,23 +895,23 @@
  * Walk the compilation unit and assign offsets to instructions
  * and literals and compute the total size of the compiled unit.
  */
-static void AssignOffsets(CompilationUnit* cu)
+void Mir2Lir::AssignOffsets()
 {
-  int offset = AssignInsnOffsets(cu);
+  int offset = AssignInsnOffsets();
 
   /* Const values have to be word aligned */
   offset = (offset + 3) & ~3;
 
   /* Set up offsets for literals */
-  cu->data_offset = offset;
+  data_offset_ = offset;
 
-  offset = AssignLiteralOffset(cu, offset);
+  offset = AssignLiteralOffset(offset);
 
-  offset = AssignSwitchTablesOffset(cu, offset);
+  offset = AssignSwitchTablesOffset(offset);
 
-  offset = AssignFillArrayDataOffset(cu, offset);
+  offset = AssignFillArrayDataOffset(offset);
 
-  cu->total_size = offset;
+  total_size_ = offset;
 }
 
 /*
@@ -939,10 +919,9 @@
  * before sending them off to the assembler. If out-of-range branch distance is
  * seen rearrange the instructions a bit to correct it.
  */
-void AssembleLIR(CompilationUnit* cu)
+void Mir2Lir::AssembleLIR()
 {
-  Codegen* cg = cu->cg.get();
-  AssignOffsets(cu);
+  AssignOffsets();
   int assembler_retries = 0;
   /*
    * Assemble here.  Note that we generate code with optimistic assumptions
@@ -950,34 +929,34 @@
    */
 
   while (true) {
-    AssemblerStatus res = cg->AssembleInstructions(cu, 0);
+    AssemblerStatus res = AssembleInstructions(0);
     if (res == kSuccess) {
       break;
     } else {
       assembler_retries++;
       if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
-        CodegenDump(cu);
+        CodegenDump();
         LOG(FATAL) << "Assembler error - too many retries";
       }
       // Redo offsets and try again
-      AssignOffsets(cu);
-      cu->code_buffer.clear();
+      AssignOffsets();
+      code_buffer_.clear();
     }
   }
 
   // Install literals
-  InstallLiteralPools(cu);
+  InstallLiteralPools();
 
   // Install switch tables
-  InstallSwitchTables(cu);
+  InstallSwitchTables();
 
   // Install fill array data
-  InstallFillArrayData(cu);
+  InstallFillArrayData();
 
   // Create the mapping table and native offset to reference map.
-  CreateMappingTables(cu);
+  CreateMappingTables();
 
-  CreateNativeGcMap(cu);
+  CreateNativeGcMap();
 }
 
 /*
@@ -987,14 +966,14 @@
  * all resource flags on this to prevent code motion across
  * target boundaries.  KeyVal is just there for debugging.
  */
-static LIR* InsertCaseLabel(CompilationUnit* cu, int vaddr, int keyVal)
+LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal)
 {
   SafeMap<unsigned int, LIR*>::iterator it;
-  it = cu->boundary_map.find(vaddr);
-  if (it == cu->boundary_map.end()) {
+  it = boundary_map_.find(vaddr);
+  if (it == boundary_map_.end()) {
     LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
   }
-  LIR* new_label = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+  LIR* new_label = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
   new_label->dalvik_offset = vaddr;
   new_label->opcode = kPseudoCaseLabel;
   new_label->operands[0] = keyVal;
@@ -1002,7 +981,7 @@
   return new_label;
 }
 
-static void MarkPackedCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
+void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1010,11 +989,11 @@
   int entries = table[1];
   int low_key = s4FromSwitchData(&table[2]);
   for (int i = 0; i < entries; i++) {
-    tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], i + low_key);
+    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
   }
 }
 
-static void MarkSparseCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
+void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1022,29 +1001,29 @@
   const int* keys = reinterpret_cast<const int*>(&table[2]);
   const int* targets = &keys[entries];
   for (int i = 0; i < entries; i++) {
-    tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], keys[i]);
+    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
   }
 }
 
-void ProcessSwitchTables(CompilationUnit* cu)
+void Mir2Lir::ProcessSwitchTables()
 {
   GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->switch_tables, &iterator);
+  GrowableListIteratorInit(&switch_tables_, &iterator);
   while (true) {
-    Codegen::SwitchTable *tab_rec =
-        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::SwitchTable *tab_rec =
+        reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
-      MarkPackedCaseLabels(cu, tab_rec);
+      MarkPackedCaseLabels(tab_rec);
     } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
-      MarkSparseCaseLabels(cu, tab_rec);
+      MarkSparseCaseLabels(tab_rec);
     } else {
       LOG(FATAL) << "Invalid switch table";
     }
   }
 }
 
-void DumpSparseSwitchTable(const uint16_t* table)
+void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table)
   /*
    * Sparse switch data format:
    *  ushort ident = 0x0200   magic value
@@ -1066,7 +1045,7 @@
   }
 }
 
-void DumpPackedSwitchTable(const uint16_t* table)
+void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table)
   /*
    * Packed switch data format:
    *  ushort ident = 0x0100   magic value
@@ -1095,16 +1074,16 @@
  * which we split a single Dalvik instruction, only the first MIR op
  * associated with a Dalvik PC should be entered into the map.
  */
-LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str)
+LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str)
 {
-  LIR* res = NewLIR1(cu, kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
-  if (cu->boundary_map.find(offset) == cu->boundary_map.end()) {
-    cu->boundary_map.Put(offset, res);
+  LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+  if (boundary_map_.find(offset) == boundary_map_.end()) {
+    boundary_map_.Put(offset, res);
   }
   return res;
 }
 
-bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2)
+bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2)
 {
   bool is_taken;
   switch (opcode) {
@@ -1128,7 +1107,7 @@
 }
 
 // Convert relation of src1/src2 to src2/src1
-ConditionCode FlipComparisonOrder(ConditionCode before) {
+ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
   ConditionCode res;
   switch (before) {
     case kCondEq: res = kCondEq; break;
@@ -1144,4 +1123,161 @@
   return res;
 }
 
+Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph)
+    : literal_list_(NULL),
+      method_literal_list_(NULL),
+      code_literal_list_(NULL),
+      cu_(cu),
+      mir_graph_(mir_graph),
+      data_offset_(0),
+      total_size_(0),
+      block_label_list_(NULL),
+      live_sreg_(0),
+      num_core_spills_(0),
+      num_fp_spills_(0),
+      frame_size_(0),
+      core_spill_mask_(0),
+      fp_spill_mask_(0),
+      first_lir_insn_(NULL),
+      last_lir_insn_(NULL)
+ {
+
+  CompilerInitGrowableList(cu_, &switch_tables_, 4, kListSwitchTables);
+  CompilerInitGrowableList(cu_, &fill_array_data_, 4, kListFillArrayData);
+  CompilerInitGrowableList(cu_, &throw_launchpads_, 2048, kListThrowLaunchPads);
+  CompilerInitGrowableList(cu_, &intrinsic_launchpads_, 4, kListMisc);
+  CompilerInitGrowableList(cu_, &suspend_launchpads_, 2048, kListSuspendLaunchPads);
+  promotion_map_ = static_cast<PromotionMap*>
+      (NewMem(cu_, (cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
+              sizeof(promotion_map_[0]), true, kAllocRegAlloc));
+}
+
+void Mir2Lir::Materialize() {
+  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
+
+  /* Allocate Registers using simple local allocation scheme */
+  SimpleRegAlloc();
+
+  //FIXME: re-enable by retrieving from mir_graph
+  SpecialCaseHandler special_case = kNoHandler;
+
+  if (special_case != kNoHandler) {
+      /*
+       * Custom codegen for special cases.  If for any reason the
+       * special codegen doesn't succeed, first_lir_insn_ will
+       * set to NULL;
+       */
+      SpecialMIR2LIR(special_case);
+    }
+
+  /* Convert MIR to LIR, etc. */
+  if (first_lir_insn_ == NULL) {
+    MethodMIR2LIR();
+  }
+
+  /* Method is not empty */
+  if (first_lir_insn_) {
+
+    // mark the targets of switch statement case labels
+    ProcessSwitchTables();
+
+    /* Convert LIR into machine code. */
+    AssembleLIR();
+
+    if (cu_->verbose) {
+      CodegenDump();
+    }
+
+  }
+
+}
+
+CompiledMethod* Mir2Lir::GetCompiledMethod() {
+  // Combine vmap tables - core regs, then fp regs - into vmap_table
+  std::vector<uint16_t> vmap_table;
+  // Core regs may have been inserted out of order - sort first
+  std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
+  for (size_t i = 0 ; i < core_vmap_table_.size(); i++) {
+    // Copy, stripping out the phys register sort key
+    vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
+  }
+  // If we have a frame, push a marker to take place of lr
+  if (frame_size_ > 0) {
+    vmap_table.push_back(INVALID_VREG);
+  } else {
+    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
+    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
+  }
+  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
+  for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
+    vmap_table.push_back(fp_vmap_table_[i]);
+  }
+  CompiledMethod* result =
+      new CompiledMethod(cu_->instruction_set, code_buffer_,
+                         frame_size_, core_spill_mask_, fp_spill_mask_,
+                         combined_mapping_table_, vmap_table, native_gc_map_);
+  return result;
+}
+
+int Mir2Lir::ComputeFrameSize() {
+  /* Figure out the frame size */
+  static const uint32_t kAlignMask = kStackAlignment - 1;
+  uint32_t size = (num_core_spills_ + num_fp_spills_ +
+                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
+                   cu_->num_compiler_temps + 1 /* cur_method* */)
+                   * sizeof(uint32_t);
+  /* Align and set */
+  return (size + kAlignMask) & ~(kAlignMask);
+}
+
+/*
+ * Append an LIR instruction to the LIR list maintained by a compilation
+ * unit
+ */
+void Mir2Lir::AppendLIR(LIR* lir)
+{
+  if (first_lir_insn_ == NULL) {
+    DCHECK(last_lir_insn_ == NULL);
+    last_lir_insn_ = first_lir_insn_ = lir;
+    lir->prev = lir->next = NULL;
+  } else {
+    last_lir_insn_->next = lir;
+    lir->prev = last_lir_insn_;
+    lir->next = NULL;
+    last_lir_insn_ = lir;
+  }
+}
+
+/*
+ * Insert an LIR instruction before the current instruction, which cannot be the
+ * first instruction.
+ *
+ * prev_lir <-> new_lir <-> current_lir
+ */
+void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir)
+{
+  DCHECK(current_lir->prev != NULL);
+  LIR *prev_lir = current_lir->prev;
+
+  prev_lir->next = new_lir;
+  new_lir->prev = prev_lir;
+  new_lir->next = current_lir;
+  current_lir->prev = new_lir;
+}
+
+/*
+ * Insert an LIR instruction after the current instruction, which cannot be the
+ * first instruction.
+ *
+ * current_lir -> new_lir -> old_next
+ */
+void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir)
+{
+  new_lir->prev = current_lir;
+  new_lir->next = current_lir->next;
+  current_lir->next = new_lir;
+  new_lir->next->prev = new_lir;
+}
+
+
 } // namespace art
diff --git a/src/compiler/dex/quick/codegen_util.h b/src/compiler/dex/quick/codegen_util.h
deleted file mode 100644
index 7fd26f3..0000000
--- a/src/compiler/dex/quick/codegen_util.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
-#define ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
-
-#include <stdint.h>
-
-#include "compiler/dex/compiler_enums.h"
-#include "compiler/dex/compiler_ir.h"
-
-namespace art {
-
-class CompilationUnit;
-struct LIR;
-
-void MarkSafepointPC(CompilationUnit* cu, LIR* inst);
-bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
-                  int& field_offset, bool& is_volatile, bool is_put);
-void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
-inline int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast<const int32_t*>(switch_data); }
-inline RegisterClass oat_reg_class_by_size(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || size == kSignedByte ) ? kCoreReg : kAnyReg; }
-void AssembleLIR(CompilationUnit* cu);
-void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type);
-void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit);
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
-void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg);
-void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
-void DumpLIRInsn(CompilationUnit* cu, LIR* arg, unsigned char* base_addr);
-void DumpPromotionMap(CompilationUnit *cu);
-void CodegenDump(CompilationUnit* cu);
-LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
-            int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
-LIR* NewLIR0(CompilationUnit* cu, int opcode);
-LIR* NewLIR1(CompilationUnit* cu, int opcode, int dest);
-LIR* NewLIR2(CompilationUnit* cu, int opcode, int dest, int src1);
-LIR* NewLIR3(CompilationUnit* cu, int opcode, int dest, int src1, int src2);
-LIR* NewLIR4(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info);
-LIR* NewLIR5(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info1, int info2);
-LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
-LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
-LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value);
-LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p, int val_lo, int val_hi);
-void ProcessSwitchTables(CompilationUnit* cu);
-void DumpSparseSwitchTable(const uint16_t* table);
-void DumpPackedSwitchTable(const uint16_t* table);
-LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str);
-void NopLIR(LIR* lir);
-bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
-bool IsInexpensiveConstant(CompilationUnit* cu, RegLocation rl_src);
-ConditionCode FlipComparisonOrder(ConditionCode before);
-
-}  // namespace art
-
-#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index 652a448..c13e797 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -14,11 +14,9 @@
  * limitations under the License.
  */
 
-#include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_internals.h"
 #include "oat/runtime/oat_support_entrypoints.h"
-#include "ralloc_util.h"
 
 namespace art {
 
@@ -32,66 +30,62 @@
  * Generate an kPseudoBarrier marker to indicate the boundary of special
  * blocks.
  */
-void Codegen::GenBarrier(CompilationUnit* cu)
+void Mir2Lir::GenBarrier()
 {
-  LIR* barrier = NewLIR0(cu, kPseudoBarrier);
+  LIR* barrier = NewLIR0(kPseudoBarrier);
   /* Mark all resources as being clobbered */
   barrier->def_mask = -1;
 }
 
 // FIXME: need to do some work to split out targets with
 // condition codes and those without
-LIR* Codegen::GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind)
+LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind)
 {
-  DCHECK_NE(cu->instruction_set, kMips);
-  LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
-                    cu->current_dalvik_offset);
-  LIR* branch = OpCondBranch(cu, c_code, tgt);
+  DCHECK_NE(cu_->instruction_set, kMips);
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
+  LIR* branch = OpCondBranch(c_code, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
   return branch;
 }
 
-LIR* Codegen::GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
-                            ThrowKind kind)
+LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind)
 {
-  LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
-                    cu->current_dalvik_offset, reg, imm_val);
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
   LIR* branch;
   if (c_code == kCondAl) {
-    branch = OpUnconditionalBranch(cu, tgt);
+    branch = OpUnconditionalBranch(tgt);
   } else {
-    branch = OpCmpImmBranch(cu, c_code, reg, imm_val, tgt);
+    branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
   }
   // Remember branch target - will process later
-  InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
   return branch;
 }
 
 /* Perform null-check on a register.  */
-LIR* Codegen::GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags)
+LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags)
 {
-  if (!(cu->disable_opt & (1 << kNullCheckElimination)) &&
+  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
     opt_flags & MIR_IGNORE_NULL_CHECK) {
     return NULL;
   }
-  return GenImmedCheck(cu, kCondEq, m_reg, 0, kThrowNullPointer);
+  return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
 }
 
 /* Perform check on two registers */
-LIR* Codegen::GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
+LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
                              ThrowKind kind)
 {
-  LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
-                    cu->current_dalvik_offset, reg1, reg2);
-  LIR* branch = OpCmpBranch(cu, c_code, reg1, reg2, tgt);
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
+  LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
   return branch;
 }
 
-void Codegen::GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
-                                  RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
+void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
+                                  RegLocation rl_src2, LIR* taken,
                                   LIR* fall_through)
 {
   ConditionCode cond;
@@ -127,29 +121,29 @@
     cond = FlipComparisonOrder(cond);
   }
 
-  rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
   // Is this really an immediate comparison?
   if (rl_src2.is_const) {
     // If it's already live in a register or not easily materialized, just keep going
-    RegLocation rl_temp = UpdateLoc(cu, rl_src2);
+    RegLocation rl_temp = UpdateLoc(rl_src2);
     if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src2))) {
+        InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
       // OK - convert this to a compare immediate and branch
-      OpCmpImmBranch(cu, cond, rl_src1.low_reg, cu->mir_graph->ConstantValue(rl_src2), taken);
-      OpUnconditionalBranch(cu, fall_through);
+      OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
+      OpUnconditionalBranch(fall_through);
       return;
     }
   }
-  rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-  OpCmpBranch(cu, cond, rl_src1.low_reg, rl_src2.low_reg, taken);
-  OpUnconditionalBranch(cu, fall_through);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+  OpUnconditionalBranch(fall_through);
 }
 
-void Codegen::GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode,
-                                      RegLocation rl_src, LIR* taken, LIR* fall_through)
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
+                                      LIR* fall_through)
 {
   ConditionCode cond;
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  rl_src = LoadValue(rl_src, kCoreReg);
   switch (opcode) {
     case Instruction::IF_EQZ:
       cond = kCondEq;
@@ -173,27 +167,27 @@
       cond = static_cast<ConditionCode>(0);
       LOG(FATAL) << "Unexpected opcode " << opcode;
   }
-  OpCmpImmBranch(cu, cond, rl_src.low_reg, 0, taken);
-  OpUnconditionalBranch(cu, fall_through);
+  OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
+  OpUnconditionalBranch(fall_through);
 }
 
-void Codegen::GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src)
 {
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
-    OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
   } else {
-    LoadValueDirect(cu, rl_src, rl_result.low_reg);
+    LoadValueDirect(rl_src, rl_result.low_reg);
   }
-  OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
-  StoreValueWide(cu, rl_dest, rl_result);
+  OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void Codegen::GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
                               RegLocation rl_src)
 {
-   rl_src = LoadValue(cu, rl_src, kCoreReg);
-   RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+   rl_src = LoadValue(rl_src, kCoreReg);
+   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    OpKind op = kOpInvalid;
    switch (opcode) {
      case Instruction::INT_TO_BYTE:
@@ -208,8 +202,8 @@
      default:
        LOG(ERROR) << "Bad int conversion type";
    }
-   OpRegReg(cu, op, rl_result.low_reg, rl_src.low_reg);
-   StoreValue(cu, rl_dest, rl_result);
+   OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
+   StoreValue(rl_dest, rl_result);
 }
 
 /*
@@ -217,21 +211,20 @@
  * Array::AllocFromCode(type_idx, method, count);
  * Note: AllocFromCode will handle checks for errNegativeArraySize.
  */
-void Codegen::GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                           RegLocation rl_src)
 {
-  FlushAllRegs(cu);  /* Everything to home location */
+  FlushAllRegs();  /* Everything to home location */
   int func_offset;
-  if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx,
-                                                      *cu->dex_file,
-                                                      type_idx)) {
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+                                                       type_idx)) {
     func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
   } else {
     func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
   }
-  CallRuntimeHelperImmMethodRegLocation(cu, func_offset, type_idx, rl_src, true);
-  RegLocation rl_result = GetReturn(cu, false);
-  StoreValue(cu, rl_dest, rl_result);
+  CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
 }
 
 /*
@@ -240,22 +233,21 @@
  * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
  * Current code also throws internal unimp if not 'L', '[' or 'I'.
  */
-void Codegen::GenFilledNewArray(CompilationUnit* cu, CallInfo* info)
+void Mir2Lir::GenFilledNewArray(CallInfo* info)
 {
   int elems = info->num_arg_words;
   int type_idx = info->index;
-  FlushAllRegs(cu);  /* Everything to home location */
+  FlushAllRegs();  /* Everything to home location */
   int func_offset;
-  if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx,
-                                                      *cu->dex_file,
-                                                      type_idx)) {
+  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+                                                       type_idx)) {
     func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
   } else {
     func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
   }
-  CallRuntimeHelperImmMethodImm(cu, func_offset, type_idx, elems, true);
-  FreeTemp(cu, TargetReg(kArg2));
-  FreeTemp(cu, TargetReg(kArg1));
+  CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
+  FreeTemp(TargetReg(kArg2));
+  FreeTemp(TargetReg(kArg1));
   /*
    * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
    * return region.  Because AllocFromCode placed the new array
@@ -263,7 +255,7 @@
    * added, it may be necessary to additionally copy all return
    * values to a home location in thread-local storage
    */
-  LockTemp(cu, TargetReg(kRet0));
+  LockTemp(TargetReg(kRet0));
 
   // TODO: use the correct component size, currently all supported types
   // share array alignment with ints (see comment at head of function)
@@ -280,9 +272,9 @@
      * home location.
      */
     for (int i = 0; i < elems; i++) {
-      RegLocation loc = UpdateLoc(cu, info->args[i]);
+      RegLocation loc = UpdateLoc(info->args[i]);
       if (loc.location == kLocPhysReg) {
-        StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
                       loc.low_reg, kWord);
       }
     }
@@ -291,83 +283,82 @@
      * this is an uncommon operation and isn't especially performance
      * critical.
      */
-    int r_src = AllocTemp(cu);
-    int r_dst = AllocTemp(cu);
-    int r_idx = AllocTemp(cu);
+    int r_src = AllocTemp();
+    int r_dst = AllocTemp();
+    int r_idx = AllocTemp();
     int r_val = INVALID_REG;
-    switch(cu->instruction_set) {
+    switch(cu_->instruction_set) {
       case kThumb2:
         r_val = TargetReg(kLr);
         break;
       case kX86:
-        FreeTemp(cu, TargetReg(kRet0));
-        r_val = AllocTemp(cu);
+        FreeTemp(TargetReg(kRet0));
+        r_val = AllocTemp();
         break;
       case kMips:
-        r_val = AllocTemp(cu);
+        r_val = AllocTemp();
         break;
-      default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
     }
     // Set up source pointer
     RegLocation rl_first = info->args[0];
-    OpRegRegImm(cu, kOpAdd, r_src, TargetReg(kSp),
-                SRegOffset(cu, rl_first.s_reg_low));
+    OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
     // Set up the target pointer
-    OpRegRegImm(cu, kOpAdd, r_dst, TargetReg(kRet0),
+    OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
                 mirror::Array::DataOffset(component_size).Int32Value());
     // Set up the loop counter (known to be > 0)
-    LoadConstant(cu, r_idx, elems - 1);
+    LoadConstant(r_idx, elems - 1);
     // Generate the copy loop.  Going backwards for convenience
-    LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+    LIR* target = NewLIR0(kPseudoTargetLabel);
     // Copy next element
-    LoadBaseIndexed(cu, r_src, r_idx, r_val, 2, kWord);
-    StoreBaseIndexed(cu, r_dst, r_idx, r_val, 2, kWord);
-    FreeTemp(cu, r_val);
-    OpDecAndBranch(cu, kCondGe, r_idx, target);
-    if (cu->instruction_set == kX86) {
+    LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
+    StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
+    FreeTemp(r_val);
+    OpDecAndBranch(kCondGe, r_idx, target);
+    if (cu_->instruction_set == kX86) {
       // Restore the target pointer
-      OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst,
+      OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
                   -mirror::Array::DataOffset(component_size).Int32Value());
     }
   } else if (!info->is_range) {
     // TUNING: interleave
     for (int i = 0; i < elems; i++) {
-      RegLocation rl_arg = LoadValue(cu, info->args[i], kCoreReg);
-      StoreBaseDisp(cu, TargetReg(kRet0),
+      RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
+      StoreBaseDisp(TargetReg(kRet0),
                     mirror::Array::DataOffset(component_size).Int32Value() +
                     i * 4, rl_arg.low_reg, kWord);
       // If the LoadValue caused a temp to be allocated, free it
-      if (IsTemp(cu, rl_arg.low_reg)) {
-        FreeTemp(cu, rl_arg.low_reg);
+      if (IsTemp(rl_arg.low_reg)) {
+        FreeTemp(rl_arg.low_reg);
       }
     }
   }
   if (info->result.location != kLocInvalid) {
-    StoreValue(cu, info->result, GetReturn(cu, false /* not fp */));
+    StoreValue(info->result, GetReturn(false /* not fp */));
   }
 }
 
-void Codegen::GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
-                      bool is_long_or_double, bool is_object)
+void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+                      bool is_object)
 {
   int field_offset;
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
-      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
       is_referrers_class, is_volatile, true);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
     if (is_referrers_class) {
       // Fast path, static storage base is this method's class
-      RegLocation rl_method  = LoadCurrMethod(cu);
-      rBase = AllocTemp(cu);
-      LoadWordDisp(cu, rl_method.low_reg,
+      RegLocation rl_method  = LoadCurrMethod();
+      rBase = AllocTemp();
+      LoadWordDisp(rl_method.low_reg,
                    mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
-      if (IsTemp(cu, rl_method.low_reg)) {
-        FreeTemp(cu, rl_method.low_reg);
+      if (IsTemp(rl_method.low_reg)) {
+        FreeTemp(rl_method.low_reg);
       }
     } else {
       // Medium path, static storage base in a different class which requires checks that the other
@@ -375,82 +366,82 @@
       // TODO: remove initialized check now that we are initializing classes in the compiler driver.
       DCHECK_GE(ssb_index, 0);
       // May do runtime call so everything to home locations.
-      FlushAllRegs(cu);
+      FlushAllRegs();
       // Using fixed register to sync with possible call to runtime support.
       int r_method = TargetReg(kArg1);
-      LockTemp(cu, r_method);
-      LoadCurrMethodDirect(cu, r_method);
+      LockTemp(r_method);
+      LoadCurrMethodDirect(r_method);
       rBase = TargetReg(kArg0);
-      LockTemp(cu, rBase);
-      LoadWordDisp(cu, r_method,
+      LockTemp(rBase);
+      LoadWordDisp(r_method,
                    mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
                    rBase);
-      LoadWordDisp(cu, rBase,
+      LoadWordDisp(rBase,
                    mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
                    sizeof(int32_t*) * ssb_index, rBase);
       // rBase now points at appropriate static storage base (Class*)
       // or NULL if not initialized. Check for NULL and call helper if NULL.
       // TUNING: fast path should fall through
-      LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
-      LoadConstant(cu, TargetReg(kArg0), ssb_index);
-      CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
-      if (cu->instruction_set == kMips) {
+      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
+      LoadConstant(TargetReg(kArg0), ssb_index);
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+      if (cu_->instruction_set == kMips) {
         // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
-        OpRegCopy(cu, rBase, TargetReg(kRet0));
+        OpRegCopy(rBase, TargetReg(kRet0));
       }
-      LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
       branch_over->target = skip_target;
-      FreeTemp(cu, r_method);
+      FreeTemp(r_method);
     }
     // rBase now holds static storage base
     if (is_long_or_double) {
-      rl_src = LoadValueWide(cu, rl_src, kAnyReg);
+      rl_src = LoadValueWide(rl_src, kAnyReg);
     } else {
-      rl_src = LoadValue(cu, rl_src, kAnyReg);
+      rl_src = LoadValue(rl_src, kAnyReg);
     }
     if (is_volatile) {
-      GenMemBarrier(cu, kStoreStore);
+      GenMemBarrier(kStoreStore);
     }
     if (is_long_or_double) {
-      StoreBaseDispWide(cu, rBase, field_offset, rl_src.low_reg,
+      StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
                         rl_src.high_reg);
     } else {
-      StoreWordDisp(cu, rBase, field_offset, rl_src.low_reg);
+      StoreWordDisp(rBase, field_offset, rl_src.low_reg);
     }
     if (is_volatile) {
-      GenMemBarrier(cu, kStoreLoad);
+      GenMemBarrier(kStoreLoad);
     }
-    if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
-      MarkGCCard(cu, rl_src.low_reg, rBase);
+    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+      MarkGCCard(rl_src.low_reg, rBase);
     }
-    FreeTemp(cu, rBase);
+    FreeTemp(rBase);
   } else {
-    FlushAllRegs(cu);  // Everything to home locations
+    FlushAllRegs();  // Everything to home locations
     int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
         (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
         : ENTRYPOINT_OFFSET(pSet32Static));
-    CallRuntimeHelperImmRegLocation(cu, setter_offset, field_idx, rl_src, true);
+    CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
   }
 }
 
-void Codegen::GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
                       bool is_long_or_double, bool is_object)
 {
   int field_offset;
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
-      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+  bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
       is_referrers_class, is_volatile, false);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
     if (is_referrers_class) {
       // Fast path, static storage base is this method's class
-      RegLocation rl_method  = LoadCurrMethod(cu);
-      rBase = AllocTemp(cu);
-      LoadWordDisp(cu, rl_method.low_reg,
+      RegLocation rl_method  = LoadCurrMethod();
+      rBase = AllocTemp();
+      LoadWordDisp(rl_method.low_reg,
                    mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
     } else {
       // Medium path, static storage base in a different class which requires checks that the other
@@ -458,128 +449,127 @@
       // TODO: remove initialized check now that we are initializing classes in the compiler driver.
       DCHECK_GE(ssb_index, 0);
       // May do runtime call so everything to home locations.
-      FlushAllRegs(cu);
+      FlushAllRegs();
       // Using fixed register to sync with possible call to runtime support.
       int r_method = TargetReg(kArg1);
-      LockTemp(cu, r_method);
-      LoadCurrMethodDirect(cu, r_method);
+      LockTemp(r_method);
+      LoadCurrMethodDirect(r_method);
       rBase = TargetReg(kArg0);
-      LockTemp(cu, rBase);
-      LoadWordDisp(cu, r_method,
+      LockTemp(rBase);
+      LoadWordDisp(r_method,
                    mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
                    rBase);
-      LoadWordDisp(cu, rBase,
-                   mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+      LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
                    sizeof(int32_t*) * ssb_index, rBase);
       // rBase now points at appropriate static storage base (Class*)
       // or NULL if not initialized. Check for NULL and call helper if NULL.
       // TUNING: fast path should fall through
-      LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
-      CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
-      if (cu->instruction_set == kMips) {
+      LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+      if (cu_->instruction_set == kMips) {
         // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
-        OpRegCopy(cu, rBase, TargetReg(kRet0));
+        OpRegCopy(rBase, TargetReg(kRet0));
       }
-      LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* skip_target = NewLIR0(kPseudoTargetLabel);
       branch_over->target = skip_target;
-      FreeTemp(cu, r_method);
+      FreeTemp(r_method);
     }
     // rBase now holds static storage base
-    RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+    RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
     if (is_volatile) {
-      GenMemBarrier(cu, kLoadLoad);
+      GenMemBarrier(kLoadLoad);
     }
     if (is_long_or_double) {
-      LoadBaseDispWide(cu, rBase, field_offset, rl_result.low_reg,
+      LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
                        rl_result.high_reg, INVALID_SREG);
     } else {
-      LoadWordDisp(cu, rBase, field_offset, rl_result.low_reg);
+      LoadWordDisp(rBase, field_offset, rl_result.low_reg);
     }
-    FreeTemp(cu, rBase);
+    FreeTemp(rBase);
     if (is_long_or_double) {
-      StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
     } else {
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
     }
   } else {
-    FlushAllRegs(cu);  // Everything to home locations
+    FlushAllRegs();  // Everything to home locations
     int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
         (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
         : ENTRYPOINT_OFFSET(pGet32Static));
-    CallRuntimeHelperImm(cu, getterOffset, field_idx, true);
+    CallRuntimeHelperImm(getterOffset, field_idx, true);
     if (is_long_or_double) {
-      RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
-      StoreValueWide(cu, rl_dest, rl_result);
+      RegLocation rl_result = GetReturnWide(rl_dest.fp);
+      StoreValueWide(rl_dest, rl_result);
     } else {
-      RegLocation rl_result = GetReturn(cu, rl_dest.fp);
-      StoreValue(cu, rl_dest, rl_result);
+      RegLocation rl_result = GetReturn(rl_dest.fp);
+      StoreValue(rl_dest, rl_result);
     }
   }
 }
 
 
 // Debugging routine - if null target, branch to DebugMe
-void Codegen::GenShowTarget(CompilationUnit* cu)
+void Mir2Lir::GenShowTarget()
 {
-  DCHECK_NE(cu->instruction_set, kX86) << "unimplemented GenShowTarget";
-  LIR* branch_over = OpCmpImmBranch(cu, kCondNe, TargetReg(kInvokeTgt), 0, NULL);
-  LoadWordDisp(cu, TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt));
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  DCHECK_NE(cu_->instruction_set, kX86) << "unimplemented GenShowTarget";
+  LIR* branch_over = OpCmpImmBranch(kCondNe, TargetReg(kInvokeTgt), 0, NULL);
+  LoadWordDisp(TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt));
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
 }
 
-void Codegen::HandleSuspendLaunchPads(CompilationUnit *cu)
+void Mir2Lir::HandleSuspendLaunchPads()
 {
-  LIR** suspend_label = reinterpret_cast<LIR**>(cu->suspend_launchpads.elem_list);
-  int num_elems = cu->suspend_launchpads.num_used;
+  LIR** suspend_label = reinterpret_cast<LIR**>(suspend_launchpads_.elem_list);
+  int num_elems = suspend_launchpads_.num_used;
   int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
   for (int i = 0; i < num_elems; i++) {
-    ResetRegPool(cu);
-    ResetDefTracking(cu);
+    ResetRegPool();
+    ResetDefTracking();
     LIR* lab = suspend_label[i];
     LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
-    cu->current_dalvik_offset = lab->operands[1];
-    AppendLIR(cu, lab);
-    int r_tgt = CallHelperSetup(cu, helper_offset);
-    CallHelper(cu, r_tgt, helper_offset, true /* MarkSafepointPC */);
-    OpUnconditionalBranch(cu, resume_lab);
+    current_dalvik_offset_ = lab->operands[1];
+    AppendLIR(lab);
+    int r_tgt = CallHelperSetup(helper_offset);
+    CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
+    OpUnconditionalBranch(resume_lab);
   }
 }
 
-void Codegen::HandleIntrinsicLaunchPads(CompilationUnit *cu)
+void Mir2Lir::HandleIntrinsicLaunchPads()
 {
-  LIR** intrinsic_label = reinterpret_cast<LIR**>(cu->intrinsic_launchpads.elem_list);
-  int num_elems = cu->intrinsic_launchpads.num_used;
+  LIR** intrinsic_label = reinterpret_cast<LIR**>(intrinsic_launchpads_.elem_list);
+  int num_elems = intrinsic_launchpads_.num_used;
   for (int i = 0; i < num_elems; i++) {
-    ResetRegPool(cu);
-    ResetDefTracking(cu);
+    ResetRegPool();
+    ResetDefTracking();
     LIR* lab = intrinsic_label[i];
     CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
-    cu->current_dalvik_offset = info->offset;
-    AppendLIR(cu, lab);
+    current_dalvik_offset_ = info->offset;
+    AppendLIR(lab);
     // NOTE: GenInvoke handles MarkSafepointPC
-    GenInvoke(cu, info);
+    GenInvoke(info);
     LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
     if (resume_lab != NULL) {
-      OpUnconditionalBranch(cu, resume_lab);
+      OpUnconditionalBranch(resume_lab);
     }
   }
 }
 
-void Codegen::HandleThrowLaunchPads(CompilationUnit *cu)
+void Mir2Lir::HandleThrowLaunchPads()
 {
-  LIR** throw_label = reinterpret_cast<LIR**>(cu->throw_launchpads.elem_list);
-  int num_elems = cu->throw_launchpads.num_used;
+  LIR** throw_label = reinterpret_cast<LIR**>(throw_launchpads_.elem_list);
+  int num_elems = throw_launchpads_.num_used;
   for (int i = 0; i < num_elems; i++) {
-    ResetRegPool(cu);
-    ResetDefTracking(cu);
+    ResetRegPool();
+    ResetDefTracking();
     LIR* lab = throw_label[i];
-    cu->current_dalvik_offset = lab->operands[1];
-    AppendLIR(cu, lab);
+    current_dalvik_offset_ = lab->operands[1];
+    AppendLIR(lab);
     int func_offset = 0;
     int v1 = lab->operands[2];
     int v2 = lab->operands[3];
-    bool target_x86 = (cu->instruction_set == kX86);
+    bool target_x86 = (cu_->instruction_set == kX86);
     switch (lab->operands[0]) {
       case kThrowNullPointer:
         func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
@@ -587,44 +577,44 @@
       case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
         // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
         if (target_x86) {
-          OpRegMem(cu, kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
+          OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
         } else {
-          OpRegCopy(cu, TargetReg(kArg1), v1);
+          OpRegCopy(TargetReg(kArg1), v1);
         }
         // Make sure the following LoadConstant doesn't mess with kArg1.
-        LockTemp(cu, TargetReg(kArg1));
-        LoadConstant(cu, TargetReg(kArg0), v2);
+        LockTemp(TargetReg(kArg1));
+        LoadConstant(TargetReg(kArg0), v2);
         func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
         break;
       case kThrowArrayBounds:
         // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
         if (v2 != TargetReg(kArg0)) {
-          OpRegCopy(cu, TargetReg(kArg0), v1);
+          OpRegCopy(TargetReg(kArg0), v1);
           if (target_x86) {
             // x86 leaves the array pointer in v2, so load the array length that the handler expects
-            OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+            OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
           } else {
-            OpRegCopy(cu, TargetReg(kArg1), v2);
+            OpRegCopy(TargetReg(kArg1), v2);
           }
         } else {
           if (v1 == TargetReg(kArg1)) {
             // Swap v1 and v2, using kArg2 as a temp
-            OpRegCopy(cu, TargetReg(kArg2), v1);
+            OpRegCopy(TargetReg(kArg2), v1);
             if (target_x86) {
               // x86 leaves the array pointer in v2; load the array length that the handler expects
-              OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
             } else {
-              OpRegCopy(cu, TargetReg(kArg1), v2);
+              OpRegCopy(TargetReg(kArg1), v2);
             }
-            OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
+            OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
           } else {
             if (target_x86) {
               // x86 leaves the array pointer in v2; load the array length that the handler expects
-              OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+              OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
             } else {
-              OpRegCopy(cu, TargetReg(kArg1), v2);
+              OpRegCopy(TargetReg(kArg1), v2);
             }
-            OpRegCopy(cu, TargetReg(kArg0), v1);
+            OpRegCopy(TargetReg(kArg0), v1);
           }
         }
         func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
@@ -633,7 +623,7 @@
         func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
         break;
       case kThrowNoSuchMethod:
-        OpRegCopy(cu, TargetReg(kArg0), v1);
+        OpRegCopy(TargetReg(kArg0), v1);
         func_offset =
           ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
         break;
@@ -641,239 +631,239 @@
         func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
         // Restore stack alignment
         if (target_x86) {
-          OpRegImm(cu, kOpAdd, TargetReg(kSp), cu->frame_size);
+          OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
         } else {
-          OpRegImm(cu, kOpAdd, TargetReg(kSp), (cu->num_core_spills + cu->num_fp_spills) * 4);
+          OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
         }
         break;
       default:
         LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
     }
-    ClobberCalleeSave(cu);
-    int r_tgt = CallHelperSetup(cu, func_offset);
-    CallHelper(cu, r_tgt, func_offset, true /* MarkSafepointPC */);
+    ClobberCalleeSave();
+    int r_tgt = CallHelperSetup(func_offset);
+    CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
   }
 }
 
-void Codegen::GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
                       RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object)
 {
   int field_offset;
   bool is_volatile;
 
-  bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
 
   if (fast_path && !SLOW_FIELD_PATH) {
     RegLocation rl_result;
     RegisterClass reg_class = oat_reg_class_by_size(size);
     DCHECK_GE(field_offset, 0);
-    rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+    rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
       DCHECK(rl_dest.wide);
-      GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-      if (cu->instruction_set == kX86) {
-        rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-        GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-        LoadBaseDispWide(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      if (cu_->instruction_set == kX86) {
+        rl_result = EvalLoc(rl_dest, reg_class, true);
+        GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+        LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
                          rl_result.high_reg, rl_obj.s_reg_low);
         if (is_volatile) {
-          GenMemBarrier(cu, kLoadLoad);
+          GenMemBarrier(kLoadLoad);
         }
       } else {
-        int reg_ptr = AllocTemp(cu);
-        OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
-        rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-        LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+        int reg_ptr = AllocTemp();
+        OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+        rl_result = EvalLoc(rl_dest, reg_class, true);
+        LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
         if (is_volatile) {
-          GenMemBarrier(cu, kLoadLoad);
+          GenMemBarrier(kLoadLoad);
         }
-        FreeTemp(cu, reg_ptr);
+        FreeTemp(reg_ptr);
       }
-      StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
     } else {
-      rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-      GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-      LoadBaseDisp(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+      rl_result = EvalLoc(rl_dest, reg_class, true);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
                    kWord, rl_obj.s_reg_low);
       if (is_volatile) {
-        GenMemBarrier(cu, kLoadLoad);
+        GenMemBarrier(kLoadLoad);
       }
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
     }
   } else {
     int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
         (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
         : ENTRYPOINT_OFFSET(pGet32Instance));
-    CallRuntimeHelperImmRegLocation(cu, getterOffset, field_idx, rl_obj, true);
+    CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
     if (is_long_or_double) {
-      RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
-      StoreValueWide(cu, rl_dest, rl_result);
+      RegLocation rl_result = GetReturnWide(rl_dest.fp);
+      StoreValueWide(rl_dest, rl_result);
     } else {
-      RegLocation rl_result = GetReturn(cu, rl_dest.fp);
-      StoreValue(cu, rl_dest, rl_result);
+      RegLocation rl_result = GetReturn(rl_dest.fp);
+      StoreValue(rl_dest, rl_result);
     }
   }
 }
 
-void Codegen::GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
                       RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
                       bool is_object)
 {
   int field_offset;
   bool is_volatile;
 
-  bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile,
+  bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
                  true);
   if (fast_path && !SLOW_FIELD_PATH) {
     RegisterClass reg_class = oat_reg_class_by_size(size);
     DCHECK_GE(field_offset, 0);
-    rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+    rl_obj = LoadValue(rl_obj, kCoreReg);
     if (is_long_or_double) {
       int reg_ptr;
-      rl_src = LoadValueWide(cu, rl_src, kAnyReg);
-      GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
-      reg_ptr = AllocTemp(cu);
-      OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+      rl_src = LoadValueWide(rl_src, kAnyReg);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      reg_ptr = AllocTemp();
+      OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
       if (is_volatile) {
-        GenMemBarrier(cu, kStoreStore);
+        GenMemBarrier(kStoreStore);
       }
-      StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+      StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
       if (is_volatile) {
-        GenMemBarrier(cu, kLoadLoad);
+        GenMemBarrier(kLoadLoad);
       }
-      FreeTemp(cu, reg_ptr);
+      FreeTemp(reg_ptr);
     } else {
-      rl_src = LoadValue(cu, rl_src, reg_class);
-      GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+      rl_src = LoadValue(rl_src, reg_class);
+      GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
       if (is_volatile) {
-        GenMemBarrier(cu, kStoreStore);
+        GenMemBarrier(kStoreStore);
       }
-      StoreBaseDisp(cu, rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
+      StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
       if (is_volatile) {
-        GenMemBarrier(cu, kLoadLoad);
+        GenMemBarrier(kLoadLoad);
       }
-      if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
-        MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
+      if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+        MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
       }
     }
   } else {
     int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
         (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
         : ENTRYPOINT_OFFSET(pSet32Instance));
-    CallRuntimeHelperImmRegLocationRegLocation(cu, setter_offset, field_idx, rl_obj, rl_src, true);
+    CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
   }
 }
 
-void Codegen::GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest)
 {
-  RegLocation rl_method = LoadCurrMethod(cu);
-  int res_reg = AllocTemp(cu);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  if (!cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx,
-                                                   *cu->dex_file,
+  RegLocation rl_method = LoadCurrMethod();
+  int res_reg = AllocTemp();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                   *cu_->dex_file,
                                                    type_idx)) {
     // Call out to helper which resolves type and verifies access.
     // Resolved type returned in kRet0.
-    CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
                             type_idx, rl_method.low_reg, true);
-    RegLocation rl_result = GetReturn(cu, false);
-    StoreValue(cu, rl_dest, rl_result);
+    RegLocation rl_result = GetReturn(false);
+    StoreValue(rl_dest, rl_result);
   } else {
     // We're don't need access checks, load type from dex cache
     int32_t dex_cache_offset =
         mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
-    LoadWordDisp(cu, rl_method.low_reg, dex_cache_offset, res_reg);
+    LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
     int32_t offset_of_type =
         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
                           * type_idx);
-    LoadWordDisp(cu, res_reg, offset_of_type, rl_result.low_reg);
-    if (!cu->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu->dex_file,
+    LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
+    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
         type_idx) || SLOW_TYPE_PATH) {
       // Slow path, at runtime test if type is null and if so initialize
-      FlushAllRegs(cu);
-      LIR* branch1 = OpCmpImmBranch(cu, kCondEq, rl_result.low_reg, 0, NULL);
+      FlushAllRegs();
+      LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
       // Resolved, store and hop over following code
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
       /*
        * Because we have stores of the target value on two paths,
        * clobber temp tracking for the destination using the ssa name
        */
-      ClobberSReg(cu, rl_dest.s_reg_low);
-      LIR* branch2 = OpUnconditionalBranch(cu,0);
+      ClobberSReg(rl_dest.s_reg_low);
+      LIR* branch2 = OpUnconditionalBranch(0);
       // TUNING: move slow path to end & remove unconditional branch
-      LIR* target1 = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* target1 = NewLIR0(kPseudoTargetLabel);
       // Call out to helper, which will return resolved type in kArg0
-      CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+      CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
                               rl_method.low_reg, true);
-      RegLocation rl_result = GetReturn(cu, false);
-      StoreValue(cu, rl_dest, rl_result);
+      RegLocation rl_result = GetReturn(false);
+      StoreValue(rl_dest, rl_result);
       /*
        * Because we have stores of the target value on two paths,
        * clobber temp tracking for the destination using the ssa name
        */
-      ClobberSReg(cu, rl_dest.s_reg_low);
+      ClobberSReg(rl_dest.s_reg_low);
       // Rejoin code paths
-      LIR* target2 = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* target2 = NewLIR0(kPseudoTargetLabel);
       branch1->target = target1;
       branch2->target = target2;
     } else {
       // Fast path, we're done - just store result
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
     }
   }
 }
 
-void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest)
+void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest)
 {
   /* NOTE: Most strings should be available at compile time */
   int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
                  (sizeof(mirror::String*) * string_idx);
-  if (!cu->compiler_driver->CanAssumeStringIsPresentInDexCache(
-      *cu->dex_file, string_idx) || SLOW_STRING_PATH) {
+  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
+      *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
     // slow path, resolve string if not in dex cache
-    FlushAllRegs(cu);
-    LockCallTemps(cu); // Using explicit registers
-    LoadCurrMethodDirect(cu, TargetReg(kArg2));
-    LoadWordDisp(cu, TargetReg(kArg2),
+    FlushAllRegs();
+    LockCallTemps(); // Using explicit registers
+    LoadCurrMethodDirect(TargetReg(kArg2));
+    LoadWordDisp(TargetReg(kArg2),
                  mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
     // Might call out to helper, which will return resolved string in kRet0
-    int r_tgt = CallHelperSetup(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode));
-    LoadWordDisp(cu, TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
-    LoadConstant(cu, TargetReg(kArg1), string_idx);
-    if (cu->instruction_set == kThumb2) {
-      OpRegImm(cu, kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
-      GenBarrier(cu);
+    int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
+    LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+    LoadConstant(TargetReg(kArg1), string_idx);
+    if (cu_->instruction_set == kThumb2) {
+      OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
+      GenBarrier();
       // For testing, always force through helper
       if (!EXERCISE_SLOWEST_STRING_PATH) {
-        OpIT(cu, kCondEq, "T");
+        OpIT(kCondEq, "T");
       }
-      OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));   // .eq
-      LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
-      MarkSafepointPC(cu, call_inst);
-      FreeTemp(cu, r_tgt);
-    } else if (cu->instruction_set == kMips) {
-      LIR* branch = OpCmpImmBranch(cu, kCondNe, TargetReg(kRet0), 0, NULL);
-      OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));   // .eq
-      LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
-      MarkSafepointPC(cu, call_inst);
-      FreeTemp(cu, r_tgt);
-      LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
+      LIR* call_inst = OpReg(kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
+      MarkSafepointPC(call_inst);
+      FreeTemp(r_tgt);
+    } else if (cu_->instruction_set == kMips) {
+      LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
+      LIR* call_inst = OpReg(kOpBlx, r_tgt);
+      MarkSafepointPC(call_inst);
+      FreeTemp(r_tgt);
+      LIR* target = NewLIR0(kPseudoTargetLabel);
       branch->target = target;
     } else {
-      DCHECK_EQ(cu->instruction_set, kX86);
-      CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+      DCHECK_EQ(cu_->instruction_set, kX86);
+      CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
     }
-    GenBarrier(cu);
-    StoreValue(cu, rl_dest, GetReturn(cu, false));
+    GenBarrier();
+    StoreValue(rl_dest, GetReturn(false));
   } else {
-    RegLocation rl_method = LoadCurrMethod(cu);
-    int res_reg = AllocTemp(cu);
-    RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    LoadWordDisp(cu, rl_method.low_reg,
+    RegLocation rl_method = LoadCurrMethod();
+    int res_reg = AllocTemp();
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    LoadWordDisp(rl_method.low_reg,
                  mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
-    LoadWordDisp(cu, res_reg, offset_of_string, rl_result.low_reg);
-    StoreValue(cu, rl_dest, rl_result);
+    LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
@@ -881,188 +871,188 @@
  * Let helper function take care of everything.  Will
  * call Class::NewInstanceFromCode(type_idx, method);
  */
-void Codegen::GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest)
 {
-  FlushAllRegs(cu);  /* Everything to home location */
+  FlushAllRegs();  /* Everything to home location */
   // alloc will always check for resolution, do we also need to verify
   // access because the verifier was unable to?
   int func_offset;
-  if (cu->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
-      cu->method_idx, *cu->dex_file, type_idx)) {
+  if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
+      cu_->method_idx, *cu_->dex_file, type_idx)) {
     func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
   } else {
     func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
   }
-  CallRuntimeHelperImmMethod(cu, func_offset, type_idx, true);
-  RegLocation rl_result = GetReturn(cu, false);
-  StoreValue(cu, rl_dest, rl_result);
+  CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
 }
 
-void Codegen::GenThrow(CompilationUnit* cu, RegLocation rl_src)
+void Mir2Lir::GenThrow(RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  CallRuntimeHelperRegLocation(cu, ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+  FlushAllRegs();
+  CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
 }
 
-void Codegen::GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
                             RegLocation rl_src)
 {
-  FlushAllRegs(cu);
+  FlushAllRegs();
   // May generate a call - use explicit registers
-  LockCallTemps(cu);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));  // kArg1 <= current Method*
+  LockCallTemps();
+  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
   int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
-  if (!cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx,
-                                                   *cu->dex_file,
+  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                   *cu_->dex_file,
                                                    type_idx)) {
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kArg0
-    CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
                          type_idx, true);
-    OpRegCopy(cu, class_reg, TargetReg(kRet0));  // Align usage with fast path
-    LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));  // kArg0 <= ref
+    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
   } else {
     // Load dex cache entry into class_reg (kArg2)
-    LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));  // kArg0 <= ref
-    LoadWordDisp(cu, TargetReg(kArg1),
+    LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
+    LoadWordDisp(TargetReg(kArg1),
                  mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
     int32_t offset_of_type =
         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
         * type_idx);
-    LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
-    if (!cu->compiler_driver->CanAssumeTypeIsPresentInDexCache(
-        *cu->dex_file, type_idx)) {
+    LoadWordDisp(class_reg, offset_of_type, class_reg);
+    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(
+        *cu_->dex_file, type_idx)) {
       // Need to test presence of type in dex cache at runtime
-      LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
+      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
       // Not resolved
       // Call out to helper, which will return resolved type in kRet0
-      CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
-      OpRegCopy(cu, TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
-      LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));  /* reload Ref */
+      CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
+      LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
       // Rejoin code paths
-      LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
       hop_branch->target = hop_target;
     }
   }
   /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
-  RegLocation rl_result = GetReturn(cu, false);
-  if (cu->instruction_set == kMips) {
-    LoadConstant(cu, rl_result.low_reg, 0);  // store false result for if branch is taken
+  RegLocation rl_result = GetReturn(false);
+  if (cu_->instruction_set == kMips) {
+    LoadConstant(rl_result.low_reg, 0);  // store false result for if branch is taken
   }
-  LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
+  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
   /* load object->klass_ */
   DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
-  LoadWordDisp(cu, TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+  LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
   /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
   LIR* call_inst;
   LIR* branchover = NULL;
-  if (cu->instruction_set == kThumb2) {
+  if (cu_->instruction_set == kThumb2) {
     /* Uses conditional nullification */
-    int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
-    OpRegReg(cu, kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
-    OpIT(cu, kCondEq, "EE");   // if-convert the test
-    LoadConstant(cu, TargetReg(kArg0), 1);     // .eq case - load true
-    OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
-    call_inst = OpReg(cu, kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
-    FreeTemp(cu, r_tgt);
+    int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+    OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
+    OpIT(kCondEq, "EE");   // if-convert the test
+    LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
+    OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
+    call_inst = OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
+    FreeTemp(r_tgt);
   } else {
     /* Uses branchovers */
-    LoadConstant(cu, rl_result.low_reg, 1);     // assume true
-    branchover = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
-    if (cu->instruction_set != kX86) {
-      int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
-      OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
-      call_inst = OpReg(cu, kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
-      FreeTemp(cu, r_tgt);
+    LoadConstant(rl_result.low_reg, 1);     // assume true
+    branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+    if (cu_->instruction_set != kX86) {
+      int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
+      call_inst = OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
+      FreeTemp(r_tgt);
     } else {
-      OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
-      call_inst = OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+      call_inst = OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
     }
   }
-  MarkSafepointPC(cu, call_inst);
-  ClobberCalleeSave(cu);
+  MarkSafepointPC(call_inst);
+  ClobberCalleeSave();
   /* branch targets here */
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
-  StoreValue(cu, rl_dest, rl_result);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
   branch1->target = target;
-  if (cu->instruction_set != kThumb2) {
+  if (cu_->instruction_set != kThumb2) {
     branchover->target = target;
   }
 }
 
-void Codegen::GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
+void Mir2Lir::GenCheckCast(uint32_t type_idx, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
+  FlushAllRegs();
   // May generate a call - use explicit registers
-  LockCallTemps(cu);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));  // kArg1 <= current Method*
+  LockCallTemps();
+  LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
   int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
-  if (!cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx,
-                                                   *cu->dex_file,
+  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
+                                                   *cu_->dex_file,
                                                    type_idx)) {
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kRet0
     // InitializeTypeAndVerifyAccess(idx, method)
-    CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
                             type_idx, TargetReg(kArg1), true);
-    OpRegCopy(cu, class_reg, TargetReg(kRet0));  // Align usage with fast path
+    OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
   } else {
     // Load dex cache entry into class_reg (kArg2)
-    LoadWordDisp(cu, TargetReg(kArg1),
+    LoadWordDisp(TargetReg(kArg1),
                  mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
     int32_t offset_of_type =
         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
         (sizeof(mirror::Class*) * type_idx);
-    LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
-    if (!cu->compiler_driver->CanAssumeTypeIsPresentInDexCache(
-        *cu->dex_file, type_idx)) {
+    LoadWordDisp(class_reg, offset_of_type, class_reg);
+    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(
+        *cu_->dex_file, type_idx)) {
       // Need to test presence of type in dex cache at runtime
-      LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
+      LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
       // Not resolved
       // Call out to helper, which will return resolved type in kArg0
       // InitializeTypeFromCode(idx, method)
-      CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
+      CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
                               true);
-      OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
+      OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
       // Rejoin code paths
-      LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+      LIR* hop_target = NewLIR0(kPseudoTargetLabel);
       hop_branch->target = hop_target;
     }
   }
   // At this point, class_reg (kArg2) has class
-  LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));  // kArg0 <= ref
+  LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
   /* Null is OK - continue */
-  LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
+  LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
   /* load object->klass_ */
   DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
-  LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+  LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
   /* kArg1 now contains object->klass_ */
   LIR* branch2;
-  if (cu->instruction_set == kThumb2) {
-    int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode));
-    OpRegReg(cu, kOpCmp, TargetReg(kArg1), class_reg);
-    branch2 = OpCondBranch(cu, kCondEq, NULL); /* If eq, trivial yes */
-    OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg1));
-    OpRegCopy(cu, TargetReg(kArg1), TargetReg(kArg2));
-    ClobberCalleeSave(cu);
-    LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
-    MarkSafepointPC(cu, call_inst);
-    FreeTemp(cu, r_tgt);
+  if (cu_->instruction_set == kThumb2) {
+    int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pCheckCastFromCode));
+    OpRegReg(kOpCmp, TargetReg(kArg1), class_reg);
+    branch2 = OpCondBranch(kCondEq, NULL); /* If eq, trivial yes */
+    OpRegCopy(TargetReg(kArg0), TargetReg(kArg1));
+    OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
+    ClobberCalleeSave();
+    LIR* call_inst = OpReg(kOpBlx, r_tgt);
+    MarkSafepointPC(call_inst);
+    FreeTemp(r_tgt);
   } else {
-    branch2 = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), class_reg, NULL);
-    CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
+    branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
+    CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
   }
   /* branch target here */
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch1->target = target;
   branch2->target = target;
 }
 
-void Codegen::GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op,
-                           RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
 {
   RegLocation rl_result;
-  if (cu->instruction_set == kThumb2) {
+  if (cu_->instruction_set == kThumb2) {
     /*
      * NOTE:  This is the one place in the code in which we might have
      * as many as six live temporary registers.  There are 5 in the normal
@@ -1071,22 +1061,22 @@
      * lr is used explicitly elsewhere in the code generator and cannot
      * normally be used as a general temp register.
      */
-    MarkTemp(cu, TargetReg(kLr));   // Add lr to the temp pool
-    FreeTemp(cu, TargetReg(kLr));   // and make it available
+    MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
+    FreeTemp(TargetReg(kLr));   // and make it available
   }
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
   // The longs may overlap - use intermediate temp if so
   if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){
-    int t_reg = AllocTemp(cu);
-    OpRegRegReg(cu, first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
-    OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
-    OpRegCopy(cu, rl_result.low_reg, t_reg);
-    FreeTemp(cu, t_reg);
+    int t_reg = AllocTemp();
+    OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+    OpRegCopy(rl_result.low_reg, t_reg);
+    FreeTemp(t_reg);
   } else {
-    OpRegRegReg(cu, first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
-    OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg,
+    OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+    OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
                 rl_src2.high_reg);
   }
   /*
@@ -1096,17 +1086,17 @@
    * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
    * Remove when spill is functional.
    */
-  FreeRegLocTemps(cu, rl_result, rl_src1);
-  FreeRegLocTemps(cu, rl_result, rl_src2);
-  StoreValueWide(cu, rl_dest, rl_result);
-  if (cu->instruction_set == kThumb2) {
-    Clobber(cu, TargetReg(kLr));
-    UnmarkTemp(cu, TargetReg(kLr));  // Remove lr from the temp pool
+  FreeRegLocTemps(rl_result, rl_src1);
+  FreeRegLocTemps(rl_result, rl_src2);
+  StoreValueWide(rl_dest, rl_result);
+  if (cu_->instruction_set == kThumb2) {
+    Clobber(TargetReg(kLr));
+    UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
   }
 }
 
 
-void Codegen::GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_shift)
 {
   int func_offset = -1; // Make gcc happy
@@ -1127,14 +1117,14 @@
     default:
       LOG(FATAL) << "Unexpected case";
   }
-  FlushAllRegs(cu);   /* Send everything to home location */
-  CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_shift, false);
-  RegLocation rl_result = GetReturnWide(cu, false);
-  StoreValueWide(cu, rl_dest, rl_result);
+  FlushAllRegs();   /* Send everything to home location */
+  CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
+  RegLocation rl_result = GetReturnWide(false);
+  StoreValueWide(rl_dest, rl_result);
 }
 
 
-void Codegen::GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
                             RegLocation rl_src1, RegLocation rl_src2)
 {
   OpKind op = kOpBkpt;
@@ -1209,58 +1199,58 @@
   }
   if (!is_div_rem) {
     if (unary) {
-      rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      OpRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg);
+      rl_src1 = LoadValue(rl_src1, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
     } else {
       if (shift_op) {
         int t_reg = INVALID_REG;
-        if (cu->instruction_set == kX86) {
+        if (cu_->instruction_set == kX86) {
           // X86 doesn't require masking and must use ECX
           t_reg = TargetReg(kCount);  // rCX
-          LoadValueDirectFixed(cu, rl_src2, t_reg);
+          LoadValueDirectFixed(rl_src2, t_reg);
         } else {
-          rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-          t_reg = AllocTemp(cu);
-          OpRegRegImm(cu, kOpAnd, t_reg, rl_src2.low_reg, 31);
+          rl_src2 = LoadValue(rl_src2, kCoreReg);
+          t_reg = AllocTemp();
+          OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
         }
-        rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-        rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-        OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, t_reg);
-        FreeTemp(cu, t_reg);
+        rl_src1 = LoadValue(rl_src1, kCoreReg);
+        rl_result = EvalLoc(rl_dest, kCoreReg, true);
+        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+        FreeTemp(t_reg);
       } else {
-        rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-        rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-        rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-        OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+        rl_src1 = LoadValue(rl_src1, kCoreReg);
+        rl_src2 = LoadValue(rl_src2, kCoreReg);
+        rl_result = EvalLoc(rl_dest, kCoreReg, true);
+        OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
       }
     }
-    StoreValue(cu, rl_dest, rl_result);
+    StoreValue(rl_dest, rl_result);
   } else {
-    if (cu->instruction_set == kMips) {
-      rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-      rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+    if (cu_->instruction_set == kMips) {
+      rl_src1 = LoadValue(rl_src1, kCoreReg);
+      rl_src2 = LoadValue(rl_src2, kCoreReg);
       if (check_zero) {
-          GenImmedCheck(cu, kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+          GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
       }
-      rl_result = GenDivRem(cu, rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+      rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
     } else {
       int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
-      FlushAllRegs(cu);   /* Send everything to home location */
-      LoadValueDirectFixed(cu, rl_src2, TargetReg(kArg1));
-      int r_tgt = CallHelperSetup(cu, func_offset);
-      LoadValueDirectFixed(cu, rl_src1, TargetReg(kArg0));
+      FlushAllRegs();   /* Send everything to home location */
+      LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
+      int r_tgt = CallHelperSetup(func_offset);
+      LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
       if (check_zero) {
-        GenImmedCheck(cu, kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
+        GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
       }
       // NOTE: callout here is not a safepoint
-      CallHelper(cu, r_tgt, func_offset, false /* not a safepoint */ );
+      CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
       if (op == kOpDiv)
-        rl_result = GetReturn(cu, false);
+        rl_result = GetReturn(false);
       else
-        rl_result = GetReturnAlt(cu);
+        rl_result = GetReturnAlt();
     }
-    StoreValue(cu, rl_dest, rl_result);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
@@ -1298,16 +1288,15 @@
 
 // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
 // and store the result in 'rl_dest'.
-static bool HandleEasyDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                             RegLocation rl_src, RegLocation rl_dest, int lit)
+bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode,
+                               RegLocation rl_src, RegLocation rl_dest, int lit)
 {
-  if ((lit < 2) || ((cu->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
+  if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
     return false;
   }
-  Codegen* cg = cu->cg.get();
   // No divide instruction for Arm, so check for more special cases
-  if ((cu->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
-    return cg->SmallLiteralDivide(cu, dalvik_opcode, rl_src, rl_dest, lit);
+  if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
+    return SmallLiteralDivide(dalvik_opcode, rl_src, rl_dest, lit);
   }
   int k = LowestSetBit(lit);
   if (k >= 30) {
@@ -1316,45 +1305,44 @@
   }
   bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
       dalvik_opcode == Instruction::DIV_INT_LIT16);
-  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (div) {
-    int t_reg = AllocTemp(cu);
+    int t_reg = AllocTemp();
     if (lit == 2) {
       // Division by 2 is by far the most common division by constant.
-      cg->OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, 32 - k);
-      cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
-      cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+      OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
     } else {
-      cg->OpRegRegImm(cu, kOpAsr, t_reg, rl_src.low_reg, 31);
-      cg->OpRegRegImm(cu, kOpLsr, t_reg, t_reg, 32 - k);
-      cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
-      cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+      OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
+      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+      OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
     }
   } else {
-    int t_reg1 = AllocTemp(cu);
-    int t_reg2 = AllocTemp(cu);
+    int t_reg1 = AllocTemp();
+    int t_reg2 = AllocTemp();
     if (lit == 2) {
-      cg->OpRegRegImm(cu, kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
-      cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
-      cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit -1);
-      cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+      OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
+      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
     } else {
-      cg->OpRegRegImm(cu, kOpAsr, t_reg1, rl_src.low_reg, 31);
-      cg->OpRegRegImm(cu, kOpLsr, t_reg1, t_reg1, 32 - k);
-      cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
-      cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit - 1);
-      cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+      OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
+      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
+      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
+      OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
     }
   }
-  cg->StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
 // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
 // and store the result in 'rl_dest'.
-static bool HandleEasyMultiply(CompilationUnit* cu, RegLocation rl_src,
-                               RegLocation rl_dest, int lit)
+bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   // Can we simplify this multiplication?
   bool power_of_two = false;
@@ -1372,31 +1360,30 @@
   } else {
     return false;
   }
-  Codegen* cg = cu->cg.get();
-  rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (power_of_two) {
     // Shift.
-    cg->OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
   } else if (pop_count_le2) {
     // Shift and add and shift.
     int first_bit = LowestSetBit(lit);
     int second_bit = LowestSetBit(lit ^ (1 << first_bit));
-    cg->GenMultiplyByTwoBitMultiplier(cu, rl_src, rl_result, lit, first_bit, second_bit);
+    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
   } else {
     // Reverse subtract: (src << (shift + 1)) - src.
     DCHECK(power_of_two_minus_one);
     // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
-    int t_reg = AllocTemp(cu);
-    cg->OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
-    cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+    int t_reg = AllocTemp();
+    OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+    OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
   }
-  cg->StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-void Codegen::GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode,
-                               RegLocation rl_dest, RegLocation rl_src, int lit)
+void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
+                               int lit)
 {
   RegLocation rl_result;
   OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
@@ -1406,15 +1393,15 @@
   switch (opcode) {
     case Instruction::RSUB_INT_LIT8:
     case Instruction::RSUB_INT: {
-      rl_src = LoadValue(cu, rl_src, kCoreReg);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      if (cu->instruction_set == kThumb2) {
-        OpRegRegImm(cu, kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
+      rl_src = LoadValue(rl_src, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      if (cu_->instruction_set == kThumb2) {
+        OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
       } else {
-        OpRegReg(cu, kOpNeg, rl_result.low_reg, rl_src.low_reg);
-        OpRegImm(cu, kOpAdd, rl_result.low_reg, lit);
+        OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+        OpRegImm(kOpAdd, rl_result.low_reg, lit);
       }
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
       return;
     }
 
@@ -1432,7 +1419,7 @@
     case Instruction::MUL_INT_2ADDR:
     case Instruction::MUL_INT_LIT8:
     case Instruction::MUL_INT_LIT16: {
-      if (HandleEasyMultiply(cu, rl_src, rl_dest, lit)) {
+      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
         return;
       }
       op = kOpMul;
@@ -1487,10 +1474,10 @@
     case Instruction::REM_INT_LIT8:
     case Instruction::REM_INT_LIT16: {
       if (lit == 0) {
-        GenImmedCheck(cu, kCondAl, 0, 0, kThrowDivZero);
+        GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
         return;
       }
-      if (HandleEasyDivide(cu, opcode, rl_src, rl_dest, lit)) {
+      if (HandleEasyDivide(opcode, rl_src, rl_dest, lit)) {
         return;
       }
       if ((opcode == Instruction::DIV_INT_LIT8) ||
@@ -1501,38 +1488,38 @@
       } else {
         is_div = false;
       }
-      if (cu->instruction_set == kMips) {
-        rl_src = LoadValue(cu, rl_src, kCoreReg);
-        rl_result = GenDivRemLit(cu, rl_dest, rl_src.low_reg, lit, is_div);
+      if (cu_->instruction_set == kMips) {
+        rl_src = LoadValue(rl_src, kCoreReg);
+        rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
       } else {
-        FlushAllRegs(cu);   /* Everything to home location */
-        LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));
-        Clobber(cu, TargetReg(kArg0));
+        FlushAllRegs();   /* Everything to home location */
+        LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+        Clobber(TargetReg(kArg0));
         int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
-        CallRuntimeHelperRegImm(cu, func_offset, TargetReg(kArg0), lit, false);
+        CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
         if (is_div)
-          rl_result = GetReturn(cu, false);
+          rl_result = GetReturn(false);
         else
-          rl_result = GetReturnAlt(cu);
+          rl_result = GetReturnAlt();
       }
-      StoreValue(cu, rl_dest, rl_result);
+      StoreValue(rl_dest, rl_result);
       return;
     }
     default:
       LOG(FATAL) << "Unexpected opcode " << opcode;
   }
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
   // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
   if (shift_op && (lit == 0)) {
-    OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+    OpRegCopy(rl_result.low_reg, rl_src.low_reg);
   } else {
-    OpRegRegImm(cu, op, rl_result.low_reg, rl_src.low_reg, lit);
+    OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
-void Codegen::GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2)
 {
   RegLocation rl_result;
@@ -1545,25 +1532,25 @@
 
   switch (opcode) {
     case Instruction::NOT_LONG:
-      rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
       // Check for destructive overlap
       if (rl_result.low_reg == rl_src2.high_reg) {
-        int t_reg = AllocTemp(cu);
-        OpRegCopy(cu, t_reg, rl_src2.high_reg);
-        OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
-        OpRegReg(cu, kOpMvn, rl_result.high_reg, t_reg);
-        FreeTemp(cu, t_reg);
+        int t_reg = AllocTemp();
+        OpRegCopy(t_reg, rl_src2.high_reg);
+        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+        OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
+        FreeTemp(t_reg);
       } else {
-        OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
-        OpRegReg(cu, kOpMvn, rl_result.high_reg, rl_src2.high_reg);
+        OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+        OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
       }
-      StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
       return;
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      if (cu->instruction_set != kThumb2) {
-        GenAddLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set != kThumb2) {
+        GenAddLong(rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpAdd;
@@ -1571,8 +1558,8 @@
       break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
-      if (cu->instruction_set != kThumb2) {
-        GenSubLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set != kThumb2) {
+        GenSubLong(rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpSub;
@@ -1580,8 +1567,8 @@
       break;
     case Instruction::MUL_LONG:
     case Instruction::MUL_LONG_2ADDR:
-      if (cu->instruction_set == kThumb2) {
-        GenMulLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kThumb2) {
+        GenMulLong(rl_dest, rl_src1, rl_src2);
         return;
       } else {
         call_out = true;
@@ -1602,20 +1589,20 @@
       check_zero = true;
       func_offset = ENTRYPOINT_OFFSET(pLdivmod);
       /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
-      ret_reg = (cu->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
       break;
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_LONG:
-      if (cu->instruction_set == kX86) {
-        return GenAndLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86) {
+        return GenAndLong(rl_dest, rl_src1, rl_src2);
       }
       first_op = kOpAnd;
       second_op = kOpAnd;
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if (cu->instruction_set == kX86) {
-        GenOrLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86) {
+        GenOrLong(rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpOr;
@@ -1623,99 +1610,98 @@
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      if (cu->instruction_set == kX86) {
-        GenXorLong(cu, rl_dest, rl_src1, rl_src2);
+      if (cu_->instruction_set == kX86) {
+        GenXorLong(rl_dest, rl_src1, rl_src2);
         return;
       }
       first_op = kOpXor;
       second_op = kOpXor;
       break;
     case Instruction::NEG_LONG: {
-      GenNegLong(cu, rl_dest, rl_src2);
+      GenNegLong(rl_dest, rl_src2);
       return;
     }
     default:
       LOG(FATAL) << "Invalid long arith op";
   }
   if (!call_out) {
-    GenLong3Addr(cu, first_op, second_op, rl_dest, rl_src1, rl_src2);
+    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
   } else {
-    FlushAllRegs(cu);   /* Send everything to home location */
+    FlushAllRegs();   /* Send everything to home location */
     if (check_zero) {
-      LoadValueDirectWideFixed(cu, rl_src2, TargetReg(kArg2), TargetReg(kArg3));
-      int r_tgt = CallHelperSetup(cu, func_offset);
-      GenDivZeroCheck(cu, TargetReg(kArg2), TargetReg(kArg3));
-      LoadValueDirectWideFixed(cu, rl_src1, TargetReg(kArg0), TargetReg(kArg1));
+      LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
+      int r_tgt = CallHelperSetup(func_offset);
+      GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
+      LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
       // NOTE: callout here is not a safepoint
-      CallHelper(cu, r_tgt, func_offset, false /* not safepoint */);
+      CallHelper(r_tgt, func_offset, false /* not safepoint */);
     } else {
-      CallRuntimeHelperRegLocationRegLocation(cu, func_offset,
-                          rl_src1, rl_src2, false);
+      CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
     }
     // Adjust return regs in to handle case of rem returning kArg2/kArg3
     if (ret_reg == TargetReg(kRet0))
-      rl_result = GetReturnWide(cu, false);
+      rl_result = GetReturnWide(false);
     else
-      rl_result = GetReturnWideAlt(cu);
-    StoreValueWide(cu, rl_dest, rl_result);
+      rl_result = GetReturnWideAlt();
+    StoreValueWide(rl_dest, rl_result);
   }
 }
 
-void Codegen::GenConversionCall(CompilationUnit* cu, int func_offset,
+void Mir2Lir::GenConversionCall(int func_offset,
                                 RegLocation rl_dest, RegLocation rl_src)
 {
   /*
    * Don't optimize the register usage since it calls out to support
    * functions
    */
-  FlushAllRegs(cu);   /* Send everything to home location */
+  FlushAllRegs();   /* Send everything to home location */
   if (rl_src.wide) {
-    LoadValueDirectWideFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
+    LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
                              rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
   } else {
-    LoadValueDirectFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+    LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
   }
-  CallRuntimeHelperRegLocation(cu, func_offset, rl_src, false);
+  CallRuntimeHelperRegLocation(func_offset, rl_src, false);
   if (rl_dest.wide) {
     RegLocation rl_result;
-    rl_result = GetReturnWide(cu, rl_dest.fp);
-    StoreValueWide(cu, rl_dest, rl_result);
+    rl_result = GetReturnWide(rl_dest.fp);
+    StoreValueWide(rl_dest, rl_result);
   } else {
     RegLocation rl_result;
-    rl_result = GetReturn(cu, rl_dest.fp);
-    StoreValue(cu, rl_dest, rl_result);
+    rl_result = GetReturn(rl_dest.fp);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
 /* Check if we need to check for pending suspend request */
-void Codegen::GenSuspendTest(CompilationUnit* cu, int opt_flags)
+void Mir2Lir::GenSuspendTest(int opt_flags)
 {
   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
     return;
   }
-  FlushAllRegs(cu);
-  LIR* branch = OpTestSuspend(cu, NULL);
-  LIR* ret_lab = NewLIR0(cu, kPseudoTargetLabel);
-  LIR* target = RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
-                       reinterpret_cast<uintptr_t>(ret_lab), cu->current_dalvik_offset);
+  FlushAllRegs();
+  LIR* branch = OpTestSuspend(NULL);
+  LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
+  LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
+                       reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
   branch->target = target;
-  InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(target));
+  InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(target));
 }
 
 /* Check if we need to check for pending suspend request */
-void Codegen::GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target)
+void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target)
 {
   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
-    OpUnconditionalBranch(cu, target);
+    OpUnconditionalBranch(target);
     return;
   }
-  OpTestSuspend(cu, target);
+  OpTestSuspend(target);
   LIR* launch_pad =
-      RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
-             reinterpret_cast<uintptr_t>(target), cu->current_dalvik_offset);
-  FlushAllRegs(cu);
-  OpUnconditionalBranch(cu, launch_pad);
-  InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+      RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
+             reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
+  FlushAllRegs();
+  OpUnconditionalBranch(launch_pad);
+  InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index c654143..3e946f8 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -14,11 +14,9 @@
  * limitations under the License.
  */
 
-#include "codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
 #include "invoke_type.h"
 #include "oat/runtime/oat_support_entrypoints.h"
-#include "ralloc_util.h"
 #include "x86/codegen_x86.h"
 
 namespace art {
@@ -35,198 +33,192 @@
  * has a memory call operation, part 1 is a NOP for x86.  For other targets,
  * load arguments between the two parts.
  */
-int Codegen::CallHelperSetup(CompilationUnit* cu, int helper_offset)
+int Mir2Lir::CallHelperSetup(int helper_offset)
 {
-  return (cu->instruction_set == kX86) ? 0 : LoadHelper(cu, helper_offset);
+  return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
 }
 
 /* NOTE: if r_tgt is a temp, it will be freed following use */
-LIR* Codegen::CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc)
+LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc)
 {
   LIR* call_inst;
-  if (cu->instruction_set == kX86) {
-    call_inst = OpThreadMem(cu, kOpBlx, helper_offset);
+  if (cu_->instruction_set == kX86) {
+    call_inst = OpThreadMem(kOpBlx, helper_offset);
   } else {
-    call_inst = OpReg(cu, kOpBlx, r_tgt);
-    FreeTemp(cu, r_tgt);
+    call_inst = OpReg(kOpBlx, r_tgt);
+    FreeTemp(r_tgt);
   }
   if (safepoint_pc) {
-    MarkSafepointPC(cu, call_inst);
+    MarkSafepointPC(call_inst);
   }
   return call_inst;
 }
 
-void Codegen::CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0,
-                                   bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0,
-                                   bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
-                                          bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
-    LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+    LoadValueDirectFixed(arg0, TargetReg(kArg0));
   } else {
-    LoadValueDirectWideFixed(cu, arg0, TargetReg(kArg0), TargetReg(kArg1));
+    LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
   }
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadConstant(TargetReg(kArg0), arg0);
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
                                               RegLocation arg1, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
+  int r_tgt = CallHelperSetup(helper_offset);
   if (arg1.wide == 0) {
-    LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+    LoadValueDirectFixed(arg1, TargetReg(kArg1));
   } else {
-    LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+    LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
   }
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset,
-                                              RegLocation arg0, int arg1, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
+                                              bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg0, TargetReg(kArg0));
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
                              bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  LoadConstant(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  int r_tgt = CallHelperSetup(helper_offset);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  LoadConstant(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
-                                         bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
-                                                      RegLocation arg0, RegLocation arg1,
-                                                      bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
+                                                      RegLocation arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
-    LoadValueDirectFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+    LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
     if (arg1.wide == 0) {
-      if (cu->instruction_set == kMips) {
-        LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+      if (cu_->instruction_set == kMips) {
+        LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
       } else {
-        LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+        LoadValueDirectFixed(arg1, TargetReg(kArg1));
       }
     } else {
-      if (cu->instruction_set == kMips) {
-        LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+      if (cu_->instruction_set == kMips) {
+        LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
       } else {
-        LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+        LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
       }
     }
   } else {
-    LoadValueDirectWideFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+    LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
     if (arg1.wide == 0) {
-      LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+      LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
     } else {
-      LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+      LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
     }
   }
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
-                                      bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
+void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
+  int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
                                          int arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
+  int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
-  OpRegCopy(cu, TargetReg(kArg0), arg0);
-  OpRegCopy(cu, TargetReg(kArg1), arg1);
-  LoadConstant(cu, TargetReg(kArg2), arg2);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  OpRegCopy(TargetReg(kArg0), arg0);
+  OpRegCopy(TargetReg(kArg1), arg1);
+  LoadConstant(TargetReg(kArg2), arg2);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
                                                     int arg0, RegLocation arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg2, TargetReg(kArg2));
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
                                             int arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadCurrMethodDirect(cu, TargetReg(kArg1));
-  LoadConstant(cu, TargetReg(kArg2), arg2);
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadCurrMethodDirect(TargetReg(kArg1));
+  LoadConstant(TargetReg(kArg2), arg2);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Codegen::CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
                                                          int arg0, RegLocation arg1,
                                                          RegLocation arg2, bool safepoint_pc) {
-  int r_tgt = CallHelperSetup(cu, helper_offset);
-  LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+  int r_tgt = CallHelperSetup(helper_offset);
+  LoadValueDirectFixed(arg1, TargetReg(kArg1));
   if (arg2.wide == 0) {
-    LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+    LoadValueDirectFixed(arg2, TargetReg(kArg2));
   } else {
-    LoadValueDirectWideFixed(cu, arg2, TargetReg(kArg2), TargetReg(kArg3));
+    LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
   }
-  LoadConstant(cu, TargetReg(kArg0), arg0);
-  ClobberCalleeSave(cu);
-  CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+  LoadConstant(TargetReg(kArg0), arg0);
+  ClobberCalleeSave();
+  CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
 /*
@@ -237,7 +229,7 @@
  * ArgLocs is an array of location records describing the incoming arguments
  * with one location record per word of argument.
  */
-void Codegen::FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method)
 {
   /*
    * Dummy up a RegLocation for the incoming Method*
@@ -248,18 +240,18 @@
   rl_src.location = kLocPhysReg;
   rl_src.low_reg = TargetReg(kArg0);
   rl_src.home = false;
-  MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
-  StoreValue(cu, rl_method, rl_src);
+  MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+  StoreValue(rl_method, rl_src);
   // If Method* has been promoted, explicitly flush
   if (rl_method.location == kLocPhysReg) {
-    StoreWordDisp(cu, TargetReg(kSp), 0, TargetReg(kArg0));
+    StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
   }
 
-  if (cu->num_ins == 0)
+  if (cu_->num_ins == 0)
     return;
   const int num_arg_regs = 3;
   static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
-  int start_vreg = cu->num_dalvik_registers - cu->num_ins;
+  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
   /*
    * Copy incoming arguments to their proper home locations.
    * NOTE: an older version of dx had an issue in which
@@ -272,17 +264,17 @@
    * end up half-promoted.  In those cases, we must flush the promoted
    * half to memory as well.
    */
-  for (int i = 0; i < cu->num_ins; i++) {
-    PromotionMap* v_map = &cu->promotion_map[start_vreg + i];
+  for (int i = 0; i < cu_->num_ins; i++) {
+    PromotionMap* v_map = &promotion_map_[start_vreg + i];
     if (i < num_arg_regs) {
       // If arriving in register
       bool need_flush = true;
       RegLocation* t_loc = &ArgLocs[i];
       if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
-        OpRegCopy(cu, v_map->core_reg, TargetReg(arg_regs[i]));
+        OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
         need_flush = false;
       } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
-        OpRegCopy(cu, v_map->FpReg, TargetReg(arg_regs[i]));
+        OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
         need_flush = false;
       } else {
         need_flush = true;
@@ -295,17 +287,17 @@
             (p_map->fp_location != v_map->fp_location);
       }
       if (need_flush) {
-        StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
                       TargetReg(arg_regs[i]), kWord);
       }
     } else {
       // If arriving in frame & promoted
       if (v_map->core_location == kLocPhysReg) {
-        LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
                      v_map->core_reg);
       }
       if (v_map->fp_location == kLocPhysReg) {
-        LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+        LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
                      v_map->FpReg);
       }
     }
@@ -321,7 +313,7 @@
                           uintptr_t direct_code, uintptr_t direct_method,
                           InvokeType type)
 {
-  Codegen* cg = cu->cg.get();
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (cu->instruction_set != kThumb2) {
     // Disable sharpening
     direct_code = 0;
@@ -331,27 +323,27 @@
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       if (direct_code != static_cast<unsigned int>(-1)) {
-        cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
+        cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
       } else {
-        LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+        LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, dex_idx, 0);
         if (data_target == NULL) {
-          data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+          data_target = cg->AddWordData(&cg->code_literal_list_, dex_idx);
           data_target->operands[1] = type;
         }
-        LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
-        AppendLIR(cu, load_pc_rel);
+        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
+        cg->AppendLIR(load_pc_rel);
         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
       }
       if (direct_method != static_cast<unsigned int>(-1)) {
-        cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
+        cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
       } else {
-        LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+        LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, dex_idx, 0);
         if (data_target == NULL) {
-          data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+          data_target = cg->AddWordData(&cg->method_literal_list_, dex_idx);
           data_target->operands[1] = type;
         }
-        LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
-        AppendLIR(cu, load_pc_rel);
+        LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
+        cg->AppendLIR(load_pc_rel);
         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
       }
       break;
@@ -362,36 +354,36 @@
     switch (state) {
     case 0:  // Get the current Method* [sets kArg0]
       // TUNING: we can save a reg copy if Method* has been promoted.
-      cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
+      cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
       break;
     case 1:  // Get method->dex_cache_resolved_methods_
-      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
         mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
       // Set up direct code if known.
       if (direct_code != 0) {
         if (direct_code != static_cast<unsigned int>(-1)) {
-          cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
+          cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
         } else {
-          LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+          LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, dex_idx, 0);
           if (data_target == NULL) {
-            data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+            data_target = cg->AddWordData(&cg->code_literal_list_, dex_idx);
             data_target->operands[1] = type;
           }
-          LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
-          AppendLIR(cu, load_pc_rel);
+          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
+          cg->AppendLIR(load_pc_rel);
           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
         }
       }
       break;
     case 2:  // Grab target method*
-      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
                        cg-> TargetReg(kArg0));
       break;
     case 3:  // Grab the code from the method*
       if (cu->instruction_set != kX86) {
         if (direct_code == 0) {
-          cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+          cg->LoadWordDisp(cg->TargetReg(kArg0),
                            mirror::AbstractMethod::GetCodeOffset().Int32Value(),
                            cg->TargetReg(kInvokeTgt));
         }
@@ -416,7 +408,7 @@
                          int state, uint32_t dex_idx, uint32_t method_idx,
                          uintptr_t unused, uintptr_t unused2, InvokeType unused3)
 {
-  Codegen* cg = cu->cg.get();
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   /*
    * This is the fast path in which the target virtual method is
    * fully resolved at compile time.
@@ -424,27 +416,27 @@
   switch (state) {
     case 0: {  // Get "this" [set kArg1]
       RegLocation  rl_arg = info->args[0];
-      cg->LoadValueDirectFixed(cu, rl_arg, cg->TargetReg(kArg1));
+      cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
       break;
     }
     case 1: // Is "this" null? [use kArg1]
-      cg->GenNullCheck(cu, info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+      cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
       // get this->klass_ [use kArg1, set kInvokeTgt]
-      cg->LoadWordDisp(cu, cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+      cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
                        cg->TargetReg(kInvokeTgt));
       break;
     case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
-      cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
                        cg->TargetReg(kInvokeTgt));
       break;
     case 3: // Get target method [use kInvokeTgt, set kArg0]
-      cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), (method_idx * 4) +
+      cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
                        cg->TargetReg(kArg0));
       break;
     case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
       if (cu->instruction_set != kX86) {
-        cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+        cg->LoadWordDisp(cg->TargetReg(kArg0),
                          mirror::AbstractMethod::GetCodeOffset().Int32Value(),
                          cg->TargetReg(kInvokeTgt));
         break;
@@ -464,7 +456,7 @@
                                  uint32_t dex_idx, uint32_t unused, uintptr_t unused2,
                                  uintptr_t direct_method, InvokeType unused4)
 {
-  Codegen* cg = cu->cg.get();
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (cu->instruction_set != kThumb2) {
     // Disable sharpening
     direct_method = 0;
@@ -476,19 +468,19 @@
     switch (state) {
       case 0:  // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
         }
         // Get the interface Method* [sets kArg0]
         if (direct_method != static_cast<unsigned int>(-1)) {
-          cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
+          cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
         } else {
-          LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+          LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, dex_idx, 0);
           if (data_target == NULL) {
-            data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+            data_target = cg->AddWordData(&cg->method_literal_list_, dex_idx);
             data_target->operands[1] = kInterface;
           }
-          LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
-          AppendLIR(cu, load_pc_rel);
+          LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
+          cg->AppendLIR(load_pc_rel);
           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
         }
         break;
@@ -499,19 +491,19 @@
     switch (state) {
       case 0:
         // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
-        cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
+        cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
         // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
         }
         break;
     case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
-      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
                        mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
                        cg->TargetReg(kArg0));
       break;
     case 2:  // Grab target method* [set/use kArg0]
-      cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+      cg->LoadWordDisp(cg->TargetReg(kArg0),
                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
                        cg->TargetReg(kArg0));
       break;
@@ -525,7 +517,7 @@
 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
                             int state, uint32_t dex_idx, uint32_t method_idx)
 {
-  Codegen* cg = cu->cg.get();
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   /*
    * This handles the case in which the base method is not fully
    * resolved at compile time, we bail to a runtime helper.
@@ -533,10 +525,10 @@
   if (state == 0) {
     if (cu->instruction_set != kX86) {
       // Load trampoline target
-      cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
     }
     // Load kArg0 with method index
-    cg->LoadConstant(cu, cg->TargetReg(kArg0), dex_idx);
+    cg->LoadConstant(cg->TargetReg(kArg0), dex_idx);
     return 1;
   }
   return -1;
@@ -585,14 +577,13 @@
   return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
 }
 
-static int LoadArgRegs(CompilationUnit* cu, CallInfo* info, int call_state,
-                       NextCallInsn next_call_insn, uint32_t dex_idx,
-                       uint32_t method_idx, uintptr_t direct_code,
-                       uintptr_t direct_method, InvokeType type, bool skip_this)
+int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
+                         NextCallInsn next_call_insn, uint32_t dex_idx,
+                         uint32_t method_idx, uintptr_t direct_code,
+                         uintptr_t direct_method, InvokeType type, bool skip_this)
 {
-  Codegen* cg = cu->cg.get();
-  int last_arg_reg = cg->TargetReg(kArg3);
-  int next_reg = cg->TargetReg(kArg1);
+  int last_arg_reg = TargetReg(kArg3);
+  int next_reg = TargetReg(kArg1);
   int next_arg = 0;
   if (skip_this) {
     next_reg++;
@@ -600,9 +591,9 @@
   }
   for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
     RegLocation rl_arg = info->args[next_arg++];
-    rl_arg = UpdateRawLoc(cu, rl_arg);
-    if (rl_arg.wide && (next_reg <= cg->TargetReg(kArg2))) {
-      cg->LoadValueDirectWideFixed(cu, rl_arg, next_reg, next_reg + 1);
+    rl_arg = UpdateRawLoc(rl_arg);
+    if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
+      LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
       next_reg++;
       next_arg++;
     } else {
@@ -610,9 +601,9 @@
         rl_arg.wide = false;
         rl_arg.is_const = false;
       }
-      cg->LoadValueDirectFixed(cu, rl_arg, next_reg);
+      LoadValueDirectFixed(rl_arg, next_reg);
     }
-    call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+    call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                  direct_code, direct_method, type);
   }
   return call_state;
@@ -625,7 +616,7 @@
  * the target method pointer.  Note, this may also be called
  * for "range" variants if the number of arguments is 5 or fewer.
  */
-int Codegen::GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info,
+int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
                                   int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
                                   uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code,
                                   uintptr_t direct_method, InvokeType type, bool skip_this)
@@ -636,7 +627,7 @@
   if (info->num_arg_words == 0)
     return call_state;
 
-  call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+  call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                            direct_code, direct_method, type);
 
   DCHECK_LE(info->num_arg_words, 5);
@@ -650,19 +641,19 @@
       rl_use2.wide) {
       int reg = -1;
       // Wide spans, we need the 2nd half of uses[2].
-      rl_arg = UpdateLocWide(cu, rl_use2);
+      rl_arg = UpdateLocWide(rl_use2);
       if (rl_arg.location == kLocPhysReg) {
         reg = rl_arg.high_reg;
       } else {
         // kArg2 & rArg3 can safely be used here
         reg = TargetReg(kArg3);
-        LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_arg.s_reg_low) + 4, reg);
-        call_state = next_call_insn(cu, info, call_state, dex_idx,
+        LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
+        call_state = next_call_insn(cu_, info, call_state, dex_idx,
                                  method_idx, direct_code, direct_method, type);
       }
-      StoreBaseDisp(cu, TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
-      StoreBaseDisp(cu, TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
+      StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
       next_use++;
     }
@@ -671,7 +662,7 @@
       int low_reg;
       int high_reg = -1;
       rl_arg = info->args[next_use];
-      rl_arg = UpdateRawLoc(cu, rl_arg);
+      rl_arg = UpdateRawLoc(rl_arg);
       if (rl_arg.location == kLocPhysReg) {
         low_reg = rl_arg.low_reg;
         high_reg = rl_arg.high_reg;
@@ -679,32 +670,32 @@
         low_reg = TargetReg(kArg2);
         if (rl_arg.wide) {
           high_reg = TargetReg(kArg3);
-          LoadValueDirectWideFixed(cu, rl_arg, low_reg, high_reg);
+          LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
         } else {
-          LoadValueDirectFixed(cu, rl_arg, low_reg);
+          LoadValueDirectFixed(rl_arg, low_reg);
         }
-        call_state = next_call_insn(cu, info, call_state, dex_idx,
+        call_state = next_call_insn(cu_, info, call_state, dex_idx,
                                  method_idx, direct_code, direct_method, type);
       }
       int outs_offset = (next_use + 1) * 4;
       if (rl_arg.wide) {
-        StoreBaseDispWide(cu, TargetReg(kSp), outs_offset, low_reg, high_reg);
+        StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
         next_use += 2;
       } else {
-        StoreWordDisp(cu, TargetReg(kSp), outs_offset, low_reg);
+        StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
         next_use++;
       }
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
     }
   }
 
-  call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+  call_state = LoadArgRegs(info, call_state, next_call_insn,
                           dex_idx, method_idx, direct_code, direct_method,
                           type, skip_this);
 
   if (pcrLabel) {
-    *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
   }
   return call_state;
 }
@@ -724,7 +715,7 @@
  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
  *
  */
-int Codegen::GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state,
+int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
                                 LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx,
                                 uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method,
                                 InvokeType type, bool skip_this)
@@ -732,7 +723,7 @@
 
   // If we can treat it as non-range (Jumbo ops will use range form)
   if (info->num_arg_words <= 5)
-    return GenDalvikArgsNoRange(cu, info, call_state, pcrLabel,
+    return GenDalvikArgsNoRange(info, call_state, pcrLabel,
                                 next_call_insn, dex_idx, method_idx,
                                 direct_code, direct_method, type, skip_this);
   /*
@@ -745,98 +736,97 @@
   for (int next_arg = 0; next_arg < info->num_arg_words;) {
     RegLocation loc = info->args[next_arg];
     if (loc.wide) {
-      loc = UpdateLocWide(cu, loc);
+      loc = UpdateLocWide(loc);
       if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
-        StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+        StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
                           loc.low_reg, loc.high_reg);
       }
       next_arg += 2;
     } else {
-      loc = UpdateLoc(cu, loc);
+      loc = UpdateLoc(loc);
       if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
-        StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
                       loc.low_reg, kWord);
       }
       next_arg++;
     }
   }
 
-  int start_offset = SRegOffset(cu, info->args[3].s_reg_low);
+  int start_offset = SRegOffset(info->args[3].s_reg_low);
   int outs_offset = 4 /* Method* */ + (3 * 4);
-  if (cu->instruction_set != kThumb2) {
+  if (cu_->instruction_set != kThumb2) {
     // Generate memcpy
-    OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
-    OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
-    CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+    OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+    OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+    CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
                                TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
   } else {
     if (info->num_arg_words >= 20) {
       // Generate memcpy
-      OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
-      OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
-      CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+      OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+      OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+      CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
                                  TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
     } else {
       // Use vldm/vstm pair using kArg3 as a temp
       int regs_left = std::min(info->num_arg_words - 3, 16);
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
-      OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
-      LIR* ld = OpVldm(cu, TargetReg(kArg3), regs_left);
+      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
+      LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
       //TUNING: loosen barrier
       ld->def_mask = ENCODE_ALL;
-      SetMemRefType(cu, ld, true /* is_load */, kDalvikReg);
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      SetMemRefType(ld, true /* is_load */, kDalvikReg);
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
-      OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
-      LIR* st = OpVstm(cu, TargetReg(kArg3), regs_left);
-      SetMemRefType(cu, st, false /* is_load */, kDalvikReg);
+      LIR* st = OpVstm(TargetReg(kArg3), regs_left);
+      SetMemRefType(st, false /* is_load */, kDalvikReg);
       st->def_mask = ENCODE_ALL;
-      call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+      call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                                direct_code, direct_method, type);
     }
   }
 
-  call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+  call_state = LoadArgRegs(info, call_state, next_call_insn,
                           dex_idx, method_idx, direct_code, direct_method,
                           type, skip_this);
 
-  call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+  call_state = next_call_insn(cu_, info, call_state, dex_idx, method_idx,
                            direct_code, direct_method, type);
   if (pcrLabel) {
-    *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1),
-                             info->opt_flags);
+    *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
   }
   return call_state;
 }
 
-RegLocation Codegen::InlineTarget(CompilationUnit* cu, CallInfo* info)
+RegLocation Mir2Lir::InlineTarget(CallInfo* info)
 {
   RegLocation res;
   if (info->result.location == kLocInvalid) {
-    res = GetReturn(cu, false);
+    res = GetReturn(false);
   } else {
     res = info->result;
   }
   return res;
 }
 
-RegLocation Codegen::InlineTargetWide(CompilationUnit* cu, CallInfo* info)
+RegLocation Mir2Lir::InlineTargetWide(CallInfo* info)
 {
   RegLocation res;
   if (info->result.location == kLocInvalid) {
-    res = GetReturnWide(cu, false);
+    res = GetReturnWide(false);
   } else {
     res = info->result;
   }
   return res;
 }
 
-bool Codegen::GenInlinedCharAt(CompilationUnit* cu, CallInfo* info)
+bool Mir2Lir::GenInlinedCharAt(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
@@ -851,59 +841,59 @@
 
   RegLocation rl_obj = info->args[0];
   RegLocation rl_idx = info->args[1];
-  rl_obj = LoadValue(cu, rl_obj, kCoreReg);
-  rl_idx = LoadValue(cu, rl_idx, kCoreReg);
+  rl_obj = LoadValue(rl_obj, kCoreReg);
+  rl_idx = LoadValue(rl_idx, kCoreReg);
   int reg_max;
-  GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
   LIR* launch_pad = NULL;
   int reg_off = INVALID_REG;
   int reg_ptr = INVALID_REG;
-  if (cu->instruction_set != kX86) {
-    reg_off = AllocTemp(cu);
-    reg_ptr = AllocTemp(cu);
+  if (cu_->instruction_set != kX86) {
+    reg_off = AllocTemp();
+    reg_ptr = AllocTemp();
     if (range_check) {
-      reg_max = AllocTemp(cu);
-      LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
+      reg_max = AllocTemp();
+      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
     }
-    LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
-    LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
+    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
     if (range_check) {
       // Set up a launch pad to allow retry in case of bounds violation */
-      launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-      InsertGrowableList(cu, &cu->intrinsic_launchpads,
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      InsertGrowableList(cu_, &intrinsic_launchpads_,
                             reinterpret_cast<uintptr_t>(launch_pad));
-      OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
-      FreeTemp(cu, reg_max);
-      OpCondBranch(cu, kCondCs, launch_pad);
+      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+      FreeTemp(reg_max);
+      OpCondBranch(kCondCs, launch_pad);
    }
   } else {
     if (range_check) {
-      reg_max = AllocTemp(cu);
-      LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
+      reg_max = AllocTemp();
+      LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
       // Set up a launch pad to allow retry in case of bounds violation */
-      launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-      InsertGrowableList(cu, &cu->intrinsic_launchpads,
+      launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+      InsertGrowableList(cu_, &intrinsic_launchpads_,
                             reinterpret_cast<uintptr_t>(launch_pad));
-      OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
-      FreeTemp(cu, reg_max);
-      OpCondBranch(cu, kCondCc, launch_pad);
+      OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
+      FreeTemp(reg_max);
+      OpCondBranch(kCondCc, launch_pad);
     }
-    reg_off = AllocTemp(cu);
-    reg_ptr = AllocTemp(cu);
-    LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
-    LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
+    reg_off = AllocTemp();
+    reg_ptr = AllocTemp();
+    LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+    LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
   }
-  OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
-  OpRegReg(cu, kOpAdd, reg_off, rl_idx.low_reg);
-  FreeTemp(cu, rl_obj.low_reg);
-  FreeTemp(cu, rl_idx.low_reg);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  LoadBaseIndexed(cu, reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
-  FreeTemp(cu, reg_off);
-  FreeTemp(cu, reg_ptr);
-  StoreValue(cu, rl_dest, rl_result);
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
+  OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
+  FreeTemp(rl_obj.low_reg);
+  FreeTemp(rl_idx.low_reg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+  FreeTemp(reg_off);
+  FreeTemp(reg_ptr);
+  StoreValue(rl_dest, rl_result);
   if (range_check) {
     launch_pad->operands[2] = 0;  // no resumption
   }
@@ -913,118 +903,117 @@
 }
 
 // Generates an inlined String.is_empty or String.length.
-bool Codegen::GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty)
+bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
   // dst = src.length();
   RegLocation rl_obj = info->args[0];
-  rl_obj = LoadValue(cu, rl_obj, kCoreReg);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
-  LoadWordDisp(cu, rl_obj.low_reg, mirror::String::CountOffset().Int32Value(),
-               rl_result.low_reg);
+  rl_obj = LoadValue(rl_obj, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+  LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
   if (is_empty) {
     // dst = (dst == 0);
-    if (cu->instruction_set == kThumb2) {
-      int t_reg = AllocTemp(cu);
-      OpRegReg(cu, kOpNeg, t_reg, rl_result.low_reg);
-      OpRegRegReg(cu, kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
+    if (cu_->instruction_set == kThumb2) {
+      int t_reg = AllocTemp();
+      OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
+      OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
     } else {
-      DCHECK_EQ(cu->instruction_set, kX86);
-      OpRegImm(cu, kOpSub, rl_result.low_reg, 1);
-      OpRegImm(cu, kOpLsr, rl_result.low_reg, 31);
+      DCHECK_EQ(cu_->instruction_set, kX86);
+      OpRegImm(kOpSub, rl_result.low_reg, 1);
+      OpRegImm(kOpLsr, rl_result.low_reg, 31);
     }
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-bool Codegen::GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info)
+bool Mir2Lir::GenInlinedAbsInt(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
   RegLocation rl_src = info->args[0];
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int sign_reg = AllocTemp(cu);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int sign_reg = AllocTemp();
   // abs(x) = y<=x>>31, (x+y)^y.
-  OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.low_reg, 31);
-  OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
-  OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+  OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-bool Codegen::GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info)
+bool Mir2Lir::GenInlinedAbsLong(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
-  if (cu->instruction_set == kThumb2) {
+  if (cu_->instruction_set == kThumb2) {
     RegLocation rl_src = info->args[0];
-    rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-    RegLocation rl_dest = InlineTargetWide(cu, info);
-    RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    int sign_reg = AllocTemp(cu);
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_dest = InlineTargetWide(info);
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    int sign_reg = AllocTemp();
     // abs(x) = y<=x>>31, (x+y)^y.
-    OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.high_reg, 31);
-    OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
-    OpRegRegReg(cu, kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
-    OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
-    OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
-    StoreValueWide(cu, rl_dest, rl_result);
+    OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
+    OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+    OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+    StoreValueWide(rl_dest, rl_result);
     return true;
   } else {
-    DCHECK_EQ(cu->instruction_set, kX86);
+    DCHECK_EQ(cu_->instruction_set, kX86);
     // Reuse source registers to avoid running out of temps
     RegLocation rl_src = info->args[0];
-    rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-    RegLocation rl_dest = InlineTargetWide(cu, info);
-    RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-    OpRegCopyWide(cu, rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
-    FreeTemp(cu, rl_src.low_reg);
-    FreeTemp(cu, rl_src.high_reg);
-    int sign_reg = AllocTemp(cu);
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_dest = InlineTargetWide(info);
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+    FreeTemp(rl_src.low_reg);
+    FreeTemp(rl_src.high_reg);
+    int sign_reg = AllocTemp();
     // abs(x) = y<=x>>31, (x+y)^y.
-    OpRegRegImm(cu, kOpAsr, sign_reg, rl_result.high_reg, 31);
-    OpRegReg(cu, kOpAdd, rl_result.low_reg, sign_reg);
-    OpRegReg(cu, kOpAdc, rl_result.high_reg, sign_reg);
-    OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
-    OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
-    StoreValueWide(cu, rl_dest, rl_result);
+    OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
+    OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+    OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
+    StoreValueWide(rl_dest, rl_result);
     return true;
   }
 }
 
-bool Codegen::GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info)
+bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
   RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTarget(cu, info);
-  StoreValue(cu, rl_dest, rl_src);
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_src);
   return true;
 }
 
-bool Codegen::GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info)
+bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
   RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(cu, info);
-  StoreValueWide(cu, rl_dest, rl_src);
+  RegLocation rl_dest = InlineTargetWide(info);
+  StoreValueWide(rl_dest, rl_src);
   return true;
 }
 
@@ -1032,14 +1021,14 @@
  * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
  * otherwise bails to standard library code.
  */
-bool Codegen::GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based)
+bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
-  ClobberCalleeSave(cu);
-  LockCallTemps(cu);  // Using fixed registers
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
   int reg_ptr = TargetReg(kArg0);
   int reg_char = TargetReg(kArg1);
   int reg_start = TargetReg(kArg2);
@@ -1047,89 +1036,89 @@
   RegLocation rl_obj = info->args[0];
   RegLocation rl_char = info->args[1];
   RegLocation rl_start = info->args[2];
-  LoadValueDirectFixed(cu, rl_obj, reg_ptr);
-  LoadValueDirectFixed(cu, rl_char, reg_char);
+  LoadValueDirectFixed(rl_obj, reg_ptr);
+  LoadValueDirectFixed(rl_char, reg_char);
   if (zero_based) {
-    LoadConstant(cu, reg_start, 0);
+    LoadConstant(reg_start, 0);
   } else {
-    LoadValueDirectFixed(cu, rl_start, reg_start);
+    LoadValueDirectFixed(rl_start, reg_start);
   }
-  int r_tgt = (cu->instruction_set != kX86) ? LoadHelper(cu, ENTRYPOINT_OFFSET(pIndexOf)) : 0;
-  GenNullCheck(cu, rl_obj.s_reg_low, reg_ptr, info->opt_flags);
-  LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-  InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
-  OpCmpImmBranch(cu, kCondGt, reg_char, 0xFFFF, launch_pad);
+  int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+  GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+  OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
   // NOTE: not a safepoint
-  if (cu->instruction_set != kX86) {
-    OpReg(cu, kOpBlx, r_tgt);
+  if (cu_->instruction_set != kX86) {
+    OpReg(kOpBlx, r_tgt);
   } else {
-    OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
   }
-  LIR* resume_tgt = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
   launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
   // Record that we've already inlined & null checked
   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
-  RegLocation rl_return = GetReturn(cu, false);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  StoreValue(cu, rl_dest, rl_return);
+  RegLocation rl_return = GetReturn(false);
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_return);
   return true;
 }
 
 /* Fast string.compareTo(Ljava/lang/string;)I. */
-bool Codegen::GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info)
+bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info)
 {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
-  ClobberCalleeSave(cu);
-  LockCallTemps(cu);  // Using fixed registers
+  ClobberCalleeSave();
+  LockCallTemps();  // Using fixed registers
   int reg_this = TargetReg(kArg0);
   int reg_cmp = TargetReg(kArg1);
 
   RegLocation rl_this = info->args[0];
   RegLocation rl_cmp = info->args[1];
-  LoadValueDirectFixed(cu, rl_this, reg_this);
-  LoadValueDirectFixed(cu, rl_cmp, reg_cmp);
-  int r_tgt = (cu->instruction_set != kX86) ?
-      LoadHelper(cu, ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
-  GenNullCheck(cu, rl_this.s_reg_low, reg_this, info->opt_flags);
+  LoadValueDirectFixed(rl_this, reg_this);
+  LoadValueDirectFixed(rl_cmp, reg_cmp);
+  int r_tgt = (cu_->instruction_set != kX86) ?
+      LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+  GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
   //TUNING: check if rl_cmp.s_reg_low is already null checked
-  LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-  InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
-  OpCmpImmBranch(cu, kCondEq, reg_cmp, 0, launch_pad);
+  LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+  InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+  OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
   // NOTE: not a safepoint
-  if (cu->instruction_set != kX86) {
-    OpReg(cu, kOpBlx, r_tgt);
+  if (cu_->instruction_set != kX86) {
+    OpReg(kOpBlx, r_tgt);
   } else {
-    OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+    OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
   }
   launch_pad->operands[2] = 0;  // No return possible
   // Record that we've already inlined & null checked
   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
-  RegLocation rl_return = GetReturn(cu, false);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  StoreValue(cu, rl_dest, rl_return);
+  RegLocation rl_return = GetReturn(false);
+  RegLocation rl_dest = InlineTarget(info);
+  StoreValue(rl_dest, rl_return);
   return true;
 }
 
-bool Codegen::GenInlinedCurrentThread(CompilationUnit* cu, CallInfo* info) {
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   int offset = Thread::PeerOffset().Int32Value();
-  if (cu->instruction_set == kThumb2 || cu->instruction_set == kMips) {
-    LoadWordDisp(cu, TargetReg(kSelf), offset, rl_result.low_reg);
+  if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
+    LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
   } else {
-    CHECK(cu->instruction_set == kX86);
-    ((X86Codegen*)this)->OpRegThreadMem(cu, kOpMov, rl_result.low_reg, offset);
+    CHECK(cu_->instruction_set == kX86);
+    ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-bool Codegen::GenInlinedUnsafeGet(CompilationUnit* cu, CallInfo* info,
+bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
                                   bool is_long, bool is_volatile) {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
@@ -1137,27 +1126,27 @@
   RegLocation rl_src_obj = info->args[1];  // Object
   RegLocation rl_src_offset = info->args[2];  // long low
   rl_src_offset.wide = 0;  // ignore high half in info->args[3]
-  RegLocation rl_dest = InlineTarget(cu, info);  // result reg
+  RegLocation rl_dest = InlineTarget(info);  // result reg
   if (is_volatile) {
-    GenMemBarrier(cu, kLoadLoad);
+    GenMemBarrier(kLoadLoad);
   }
-  RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
-  RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_long) {
-    OpRegReg(cu, kOpAdd, rl_object.low_reg, rl_offset.low_reg);
-    LoadBaseDispWide(cu, rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
-    StoreValueWide(cu, rl_dest, rl_result);
+    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+    LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    LoadBaseIndexed(cu, rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
-    StoreValue(cu, rl_dest, rl_result);
+    LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
+    StoreValue(rl_dest, rl_result);
   }
   return true;
 }
 
-bool Codegen::GenInlinedUnsafePut(CompilationUnit* cu, CallInfo* info, bool is_long,
+bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
                                   bool is_object, bool is_volatile, bool is_ordered) {
-  if (cu->instruction_set == kMips) {
+  if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
@@ -1167,27 +1156,27 @@
   rl_src_offset.wide = 0;  // ignore high half in info->args[3]
   RegLocation rl_src_value = info->args[4];  // value to store
   if (is_volatile || is_ordered) {
-    GenMemBarrier(cu, kStoreStore);
+    GenMemBarrier(kStoreStore);
   }
-  RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
-  RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
-  RegLocation rl_value = LoadValue(cu, rl_src_value, kCoreReg);
+  RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
+  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
+  RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
   if (is_long) {
-    OpRegReg(cu, kOpAdd, rl_object.low_reg, rl_offset.low_reg);
-    StoreBaseDispWide(cu, rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+    OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+    StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
   } else {
-    StoreBaseIndexed(cu, rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
+    StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
   }
   if (is_volatile) {
-    GenMemBarrier(cu, kStoreLoad);
+    GenMemBarrier(kStoreLoad);
   }
   if (is_object) {
-    MarkGCCard(cu, rl_value.low_reg, rl_object.low_reg);
+    MarkGCCard(rl_value.low_reg, rl_object.low_reg);
   }
   return true;
 }
 
-bool Codegen::GenIntrinsic(CompilationUnit* cu, CallInfo* info)
+bool Mir2Lir::GenIntrinsic(CallInfo* info)
 {
   if (info->opt_flags & MIR_INLINED) {
     return false;
@@ -1202,129 +1191,129 @@
    * method.  By doing this during basic block construction, we can also
    * take advantage of/generate new useful dataflow info.
    */
-  std::string tgt_method(PrettyMethod(info->index, *cu->dex_file));
+  std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   if (tgt_method.find(" java.lang") != std::string::npos) {
     if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
-      return GenInlinedDoubleCvt(cu, info);
+      return GenInlinedDoubleCvt(info);
     }
     if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
-      return GenInlinedDoubleCvt(cu, info);
+      return GenInlinedDoubleCvt(info);
     }
     if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
-      return GenInlinedFloatCvt(cu, info);
+      return GenInlinedFloatCvt(info);
     }
     if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
-      return GenInlinedFloatCvt(cu, info);
+      return GenInlinedFloatCvt(info);
     }
     if (tgt_method == "int java.lang.Math.abs(int)" ||
         tgt_method == "int java.lang.StrictMath.abs(int)") {
-      return GenInlinedAbsInt(cu, info);
+      return GenInlinedAbsInt(info);
     }
     if (tgt_method == "long java.lang.Math.abs(long)" ||
         tgt_method == "long java.lang.StrictMath.abs(long)") {
-      return GenInlinedAbsLong(cu, info);
+      return GenInlinedAbsLong(info);
     }
     if (tgt_method == "int java.lang.Math.max(int, int)" ||
         tgt_method == "int java.lang.StrictMath.max(int, int)") {
-      return GenInlinedMinMaxInt(cu, info, false /* is_min */);
+      return GenInlinedMinMaxInt(info, false /* is_min */);
     }
     if (tgt_method == "int java.lang.Math.min(int, int)" ||
         tgt_method == "int java.lang.StrictMath.min(int, int)") {
-      return GenInlinedMinMaxInt(cu, info, true /* is_min */);
+      return GenInlinedMinMaxInt(info, true /* is_min */);
     }
     if (tgt_method == "double java.lang.Math.sqrt(double)" ||
         tgt_method == "double java.lang.StrictMath.sqrt(double)") {
-      return GenInlinedSqrt(cu, info);
+      return GenInlinedSqrt(info);
     }
     if (tgt_method == "char java.lang.String.charAt(int)") {
-      return GenInlinedCharAt(cu, info);
+      return GenInlinedCharAt(info);
     }
     if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
-      return GenInlinedStringCompareTo(cu, info);
+      return GenInlinedStringCompareTo(info);
     }
     if (tgt_method == "boolean java.lang.String.is_empty()") {
-      return GenInlinedStringIsEmptyOrLength(cu, info, true /* is_empty */);
+      return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
     }
     if (tgt_method == "int java.lang.String.index_of(int, int)") {
-      return GenInlinedIndexOf(cu, info, false /* base 0 */);
+      return GenInlinedIndexOf(info, false /* base 0 */);
     }
     if (tgt_method == "int java.lang.String.index_of(int)") {
-      return GenInlinedIndexOf(cu, info, true /* base 0 */);
+      return GenInlinedIndexOf(info, true /* base 0 */);
     }
     if (tgt_method == "int java.lang.String.length()") {
-      return GenInlinedStringIsEmptyOrLength(cu, info, false /* is_empty */);
+      return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
     }
     if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
-      return GenInlinedCurrentThread(cu, info);
+      return GenInlinedCurrentThread(info);
     }
   } else if (tgt_method.find(" sun.misc.Unsafe") != std::string::npos) {
     if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
-      return GenInlinedCas32(cu, info, false);
+      return GenInlinedCas32(info, false);
     }
     if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
-      return GenInlinedCas32(cu, info, true);
+      return GenInlinedCas32(info, true);
     }
     if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, false /* is_long */, false /* is_volatile */);
+      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
     }
     if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, false /* is_long */, true /* is_volatile */);
+      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
                                  false /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
                                  true /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
                                  false /* is_volatile */, true /* is_ordered */);
     }
     if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, true /* is_long */, false /* is_volatile */);
+      return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
     }
     if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, true /* is_long */, true /* is_volatile */);
+      return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
-      return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
                                  false /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
-      return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
                                  true /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
-      return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+      return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
                                  false /* is_volatile */, true /* is_ordered */);
     }
     if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, false /* is_long */, false /* is_volatile */);
+      return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
     }
     if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
-      return GenInlinedUnsafeGet(cu, info, false /* is_long */, true /* is_volatile */);
+      return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
                                  false /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
                                  true /* is_volatile */, false /* is_ordered */);
     }
     if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
-      return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+      return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
                                  false /* is_volatile */, true /* is_ordered */);
     }
   }
   return false;
 }
 
-void Codegen::GenInvoke(CompilationUnit* cu, CallInfo* info)
+void Mir2Lir::GenInvoke(CallInfo* info)
 {
-  if (GenIntrinsic(cu, info)) {
+  if (GenIntrinsic(info)) {
     return;
   }
   InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
@@ -1332,17 +1321,17 @@
   LIR* null_ck;
   LIR** p_null_ck = NULL;
   NextCallInsn next_call_insn;
-  FlushAllRegs(cu);  /* Everything to home location */
+  FlushAllRegs();  /* Everything to home location */
   // Explicit register usage
-  LockCallTemps(cu);
+  LockCallTemps();
 
   uint32_t dex_method_idx = info->index;
   int vtable_idx;
   uintptr_t direct_code;
   uintptr_t direct_method;
   bool skip_this;
-  bool fast_path = cu->compiler_driver->ComputeInvokeInfo(
-      dex_method_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), info->type, vtable_idx,
+  bool fast_path = cu_->compiler_driver->ComputeInvokeInfo(
+      dex_method_idx, mir_graph_->GetCurrentDexCompilationUnit(), info->type, vtable_idx,
       direct_code, direct_method) && !SLOW_INVOKE_PATH;
   if (info->type == kInterface) {
     if (fast_path) {
@@ -1370,31 +1359,31 @@
     skip_this = fast_path;
   }
   if (!info->is_range) {
-    call_state = GenDalvikArgsNoRange(cu, info, call_state, p_null_ck,
+    call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
                                      next_call_insn, dex_method_idx,
                                      vtable_idx, direct_code, direct_method,
                                      original_type, skip_this);
   } else {
-    call_state = GenDalvikArgsRange(cu, info, call_state, p_null_ck,
+    call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
                                    next_call_insn, dex_method_idx, vtable_idx,
                                    direct_code, direct_method, original_type,
                                    skip_this);
   }
   // Finish up any of the call sequence not interleaved in arg loading
   while (call_state >= 0) {
-    call_state = next_call_insn(cu, info, call_state, dex_method_idx,
+    call_state = next_call_insn(cu_, info, call_state, dex_method_idx,
                              vtable_idx, direct_code, direct_method,
                              original_type);
   }
-  if (cu->enable_debug & (1 << kDebugDisplayMissingTargets)) {
-    GenShowTarget(cu);
+  if (cu_->enable_debug & (1 << kDebugDisplayMissingTargets)) {
+    GenShowTarget();
   }
   LIR* call_inst;
-  if (cu->instruction_set != kX86) {
-    call_inst = OpReg(cu, kOpBlx, TargetReg(kInvokeTgt));
+  if (cu_->instruction_set != kX86) {
+    call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
   } else {
     if (fast_path && info->type != kInterface) {
-      call_inst = OpMem(cu, kOpBlx, TargetReg(kArg0),
+      call_inst = OpMem(kOpBlx, TargetReg(kArg0),
                         mirror::AbstractMethod::GetCodeOffset().Int32Value());
     } else {
       int trampoline = 0;
@@ -1418,54 +1407,22 @@
       default:
         LOG(FATAL) << "Unexpected invoke type";
       }
-      call_inst = OpThreadMem(cu, kOpBlx, trampoline);
+      call_inst = OpThreadMem(kOpBlx, trampoline);
     }
   }
-  MarkSafepointPC(cu, call_inst);
+  MarkSafepointPC(call_inst);
 
-  ClobberCalleeSave(cu);
+  ClobberCalleeSave();
   if (info->result.location != kLocInvalid) {
     // We have a following MOVE_RESULT - do it now.
     if (info->result.wide) {
-      RegLocation ret_loc = GetReturnWide(cu, info->result.fp);
-      StoreValueWide(cu, info->result, ret_loc);
+      RegLocation ret_loc = GetReturnWide(info->result.fp);
+      StoreValueWide(info->result, ret_loc);
     } else {
-      RegLocation ret_loc = GetReturn(cu, info->result.fp);
-      StoreValue(cu, info->result, ret_loc);
+      RegLocation ret_loc = GetReturn(info->result.fp);
+      StoreValue(info->result, ret_loc);
     }
   }
 }
 
-/*
- * Build an array of location records for the incoming arguments.
- * Note: one location record per word of arguments, with dummy
- * high-word loc for wide arguments.  Also pull up any following
- * MOVE_RESULT and incorporate it into the invoke.
- */
-CallInfo* Codegen::NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
-                                  bool is_range)
-{
-  CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
-  MIR* move_result_mir = cu->mir_graph->FindMoveResult(bb, mir);
-  if (move_result_mir == NULL) {
-    info->result.location = kLocInvalid;
-  } else {
-    info->result = GetRawDest(cu, move_result_mir);
-    move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
-    move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  }
-  info->num_arg_words = mir->ssa_rep->num_uses;
-  info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
-      (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
-  for (int i = 0; i < info->num_arg_words; i++) {
-    info->args[i] = GetRawSrc(cu, mir, i);
-  }
-  info->opt_flags = mir->optimization_flags;
-  info->type = type;
-  info->is_range = is_range;
-  info->index = mir->dalvikInsn.vB;
-  info->offset = mir->offset;
-  return info;
-}
-
 }  // namespace art
diff --git a/src/compiler/dex/quick/gen_loadstore.cc b/src/compiler/dex/quick/gen_loadstore.cc
index 7e116fc..1cebd31 100644
--- a/src/compiler/dex/quick/gen_loadstore.cc
+++ b/src/compiler/dex/quick/gen_loadstore.cc
@@ -14,11 +14,9 @@
  * limitations under the License.
  */
 
-#include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_internals.h"
 #include "invoke_type.h"
-#include "ralloc_util.h"
 
 namespace art {
 
@@ -28,13 +26,13 @@
  * Load an immediate value into a fixed or temp register.  Target
  * register is clobbered, and marked in_use.
  */
-LIR* Codegen::LoadConstant(CompilationUnit* cu, int r_dest, int value)
+LIR* Mir2Lir::LoadConstant(int r_dest, int value)
 {
-  if (IsTemp(cu, r_dest)) {
-    Clobber(cu, r_dest);
-    MarkInUse(cu, r_dest);
+  if (IsTemp(r_dest)) {
+    Clobber(r_dest);
+    MarkInUse(r_dest);
   }
-  return LoadConstantNoClobber(cu, r_dest, value);
+  return LoadConstantNoClobber(r_dest, value);
 }
 
 /*
@@ -42,54 +40,51 @@
  * promoted floating point register, also copy a zero into the int/ref identity of
  * that sreg.
  */
-void Codegen::Workaround7250540(CompilationUnit* cu, RegLocation rl_dest, int zero_reg)
+void Mir2Lir::Workaround7250540(RegLocation rl_dest, int zero_reg)
 {
   if (rl_dest.fp) {
-    int pmap_index = SRegToPMap(cu, rl_dest.s_reg_low);
-    if (cu->promotion_map[pmap_index].fp_location == kLocPhysReg) {
+    int pmap_index = SRegToPMap(rl_dest.s_reg_low);
+    if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
       // Now, determine if this vreg is ever used as a reference.  If not, we're done.
-      if (!cu->gen_bitcode) {
-        // TUNING: We no longer have this info for QuickGBC - assume the worst
-        bool used_as_reference = false;
-        int base_vreg = cu->mir_graph->SRegToVReg(rl_dest.s_reg_low);
-        for (int i = 0; !used_as_reference && (i < cu->mir_graph->GetNumSSARegs()); i++) {
-          if (cu->mir_graph->SRegToVReg(cu->reg_location[i].s_reg_low) == base_vreg) {
-            used_as_reference |= cu->reg_location[i].ref;
-          }
+      bool used_as_reference = false;
+      int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+      for (int i = 0; !used_as_reference && (i < mir_graph_->GetNumSSARegs()); i++) {
+        if (mir_graph_->SRegToVReg(mir_graph_->reg_location_[i].s_reg_low) == base_vreg) {
+          used_as_reference |= mir_graph_->reg_location_[i].ref;
         }
-        if (!used_as_reference) {
-          return;
-        }
+      }
+      if (!used_as_reference) {
+        return;
       }
       int temp_reg = zero_reg;
       if (temp_reg == INVALID_REG) {
-        temp_reg = AllocTemp(cu);
-        cu->cg->LoadConstant(cu, temp_reg, 0);
+        temp_reg = AllocTemp();
+        LoadConstant(temp_reg, 0);
       }
-      if (cu->promotion_map[pmap_index].core_location == kLocPhysReg) {
+      if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
         // Promoted - just copy in a zero
-        OpRegCopy(cu, cu->promotion_map[pmap_index].core_reg, temp_reg);
+        OpRegCopy(promotion_map_[pmap_index].core_reg, temp_reg);
       } else {
         // Lives in the frame, need to store.
-        StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low), temp_reg, kWord);
+        StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, kWord);
       }
       if (zero_reg == INVALID_REG) {
-        FreeTemp(cu, temp_reg);
+        FreeTemp(temp_reg);
       }
     }
   }
 }
 
 /* Load a word at base + displacement.  Displacement must be word multiple */
-LIR* Codegen::LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest)
+LIR* Mir2Lir::LoadWordDisp(int rBase, int displacement, int r_dest)
 {
-  return LoadBaseDisp(cu, rBase, displacement, r_dest, kWord,
+  return LoadBaseDisp(rBase, displacement, r_dest, kWord,
                       INVALID_SREG);
 }
 
-LIR* Codegen::StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src)
+LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src)
 {
-  return StoreBaseDisp(cu, rBase, displacement, r_src, kWord);
+  return StoreBaseDisp(rBase, displacement, r_src, kWord);
 }
 
 /*
@@ -97,17 +92,17 @@
  * using this routine, as it doesn't perform any bookkeeping regarding
  * register liveness.  That is the responsibility of the caller.
  */
-void Codegen::LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest)
 {
-  rl_src = UpdateLoc(cu, rl_src);
+  rl_src = UpdateLoc(rl_src);
   if (rl_src.location == kLocPhysReg) {
-    OpRegCopy(cu, r_dest, rl_src.low_reg);
-  } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantNoClobber(cu, r_dest, cu->mir_graph->ConstantValue(rl_src));
+    OpRegCopy(r_dest, rl_src.low_reg);
+  } else if (IsInexpensiveConstant(rl_src)) {
+    LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
-    LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low), r_dest);
+    LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
   }
 }
 
@@ -116,11 +111,11 @@
  * register.  Should be used when loading to a fixed register (for example,
  * loading arguments to an out of line call.
  */
-void Codegen::LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, int r_dest)
 {
-  Clobber(cu, r_dest);
-  MarkInUse(cu, r_dest);
-  LoadValueDirect(cu, rl_src, r_dest);
+  Clobber(r_dest);
+  MarkInUse(r_dest);
+  LoadValueDirect(rl_src, r_dest);
 }
 
 /*
@@ -128,18 +123,18 @@
  * using this routine, as it doesn't perform any bookkeeping regarding
  * register liveness.  That is the responsibility of the caller.
  */
-void Codegen::LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo,
              int reg_hi)
 {
-  rl_src = UpdateLocWide(cu, rl_src);
+  rl_src = UpdateLocWide(rl_src);
   if (rl_src.location == kLocPhysReg) {
-    OpRegCopyWide(cu, reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
-  } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantWide(cu, reg_lo, reg_hi, cu->mir_graph->ConstantValueWide(rl_src));
+    OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
+  } else if (IsInexpensiveConstant(rl_src)) {
+    LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
-    LoadBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low),
+    LoadBaseDispWide(TargetReg(kSp), SRegOffset(rl_src.s_reg_low),
                      reg_lo, reg_hi, INVALID_SREG);
   }
 }
@@ -149,28 +144,28 @@
  * registers.  Should be used when loading to a fixed registers (for example,
  * loading arguments to an out of line call.
  */
-void Codegen::LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo,
                                        int reg_hi)
 {
-  Clobber(cu, reg_lo);
-  Clobber(cu, reg_hi);
-  MarkInUse(cu, reg_lo);
-  MarkInUse(cu, reg_hi);
-  LoadValueDirectWide(cu, rl_src, reg_lo, reg_hi);
+  Clobber(reg_lo);
+  Clobber(reg_hi);
+  MarkInUse(reg_lo);
+  MarkInUse(reg_hi);
+  LoadValueDirectWide(rl_src, reg_lo, reg_hi);
 }
 
-RegLocation Codegen::LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
+RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind)
 {
-  rl_src = EvalLoc(cu, rl_src, op_kind, false);
-  if (IsInexpensiveConstant(cu, rl_src) || rl_src.location != kLocPhysReg) {
-    LoadValueDirect(cu, rl_src, rl_src.low_reg);
+  rl_src = EvalLoc(rl_src, op_kind, false);
+  if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
+    LoadValueDirect(rl_src, rl_src.low_reg);
     rl_src.location = kLocPhysReg;
-    MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+    MarkLive(rl_src.low_reg, rl_src.s_reg_low);
   }
   return rl_src;
 }
 
-void Codegen::StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src)
 {
   /*
    * Sanity checking - should never try to store to the same
@@ -178,68 +173,68 @@
    * without an intervening ClobberSReg().
    */
   if (kIsDebugBuild) {
-    DCHECK((cu->live_sreg == INVALID_SREG) ||
-           (rl_dest.s_reg_low != cu->live_sreg));
-    cu->live_sreg = rl_dest.s_reg_low;
+    DCHECK((live_sreg_ == INVALID_SREG) ||
+           (rl_dest.s_reg_low != live_sreg_));
+    live_sreg_ = rl_dest.s_reg_low;
   }
   LIR* def_start;
   LIR* def_end;
   DCHECK(!rl_dest.wide);
   DCHECK(!rl_src.wide);
-  rl_src = UpdateLoc(cu, rl_src);
-  rl_dest = UpdateLoc(cu, rl_dest);
+  rl_src = UpdateLoc(rl_src);
+  rl_dest = UpdateLoc(rl_dest);
   if (rl_src.location == kLocPhysReg) {
-    if (IsLive(cu, rl_src.low_reg) ||
-      IsPromoted(cu, rl_src.low_reg) ||
+    if (IsLive(rl_src.low_reg) ||
+      IsPromoted(rl_src.low_reg) ||
       (rl_dest.location == kLocPhysReg)) {
       // Src is live/promoted or Dest has assigned reg.
-      rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
-      OpRegCopy(cu, rl_dest.low_reg, rl_src.low_reg);
+      rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+      OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
     } else {
       // Just re-assign the registers.  Dest gets Src's regs
       rl_dest.low_reg = rl_src.low_reg;
-      Clobber(cu, rl_src.low_reg);
+      Clobber(rl_src.low_reg);
     }
   } else {
     // Load Src either into promoted Dest or temps allocated for Dest
-    rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
-    LoadValueDirect(cu, rl_src, rl_dest.low_reg);
+    rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+    LoadValueDirect(rl_src, rl_dest.low_reg);
   }
 
   // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
-  MarkDirty(cu, rl_dest);
+  MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+  MarkDirty(rl_dest);
 
 
-  ResetDefLoc(cu, rl_dest);
-  if (IsDirty(cu, rl_dest.low_reg) &&
-      oat_live_out(cu, rl_dest.s_reg_low)) {
-    def_start = cu->last_lir_insn;
-    StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+  ResetDefLoc(rl_dest);
+  if (IsDirty(rl_dest.low_reg) &&
+      oat_live_out(rl_dest.s_reg_low)) {
+    def_start = last_lir_insn_;
+    StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
                   rl_dest.low_reg, kWord);
-    MarkClean(cu, rl_dest);
-    def_end = cu->last_lir_insn;
+    MarkClean(rl_dest);
+    def_end = last_lir_insn_;
     if (!rl_dest.ref) {
       // Exclude references from store elimination
-      MarkDef(cu, rl_dest, def_start, def_end);
+      MarkDef(rl_dest, def_start, def_end);
     }
   }
 }
 
-RegLocation Codegen::LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
+RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind)
 {
   DCHECK(rl_src.wide);
-  rl_src = EvalLoc(cu, rl_src, op_kind, false);
-  if (IsInexpensiveConstant(cu, rl_src) || rl_src.location != kLocPhysReg) {
-    LoadValueDirectWide(cu, rl_src, rl_src.low_reg, rl_src.high_reg);
+  rl_src = EvalLoc(rl_src, op_kind, false);
+  if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
+    LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg);
     rl_src.location = kLocPhysReg;
-    MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
-    MarkLive(cu, rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
+    MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+    MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
   }
   return rl_src;
 }
 
-void Codegen::StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src)
 {
   /*
    * Sanity checking - should never try to store to the same
@@ -247,9 +242,9 @@
    * without an intervening ClobberSReg().
    */
   if (kIsDebugBuild) {
-    DCHECK((cu->live_sreg == INVALID_SREG) ||
-           (rl_dest.s_reg_low != cu->live_sreg));
-    cu->live_sreg = rl_dest.s_reg_low;
+    DCHECK((live_sreg_ == INVALID_SREG) ||
+           (rl_dest.s_reg_low != live_sreg_));
+    live_sreg_ = rl_dest.s_reg_low;
   }
   LIR* def_start;
   LIR* def_end;
@@ -257,60 +252,60 @@
   DCHECK(rl_dest.wide);
   DCHECK(rl_src.wide);
   if (rl_src.location == kLocPhysReg) {
-    if (IsLive(cu, rl_src.low_reg) ||
-        IsLive(cu, rl_src.high_reg) ||
-        IsPromoted(cu, rl_src.low_reg) ||
-        IsPromoted(cu, rl_src.high_reg) ||
+    if (IsLive(rl_src.low_reg) ||
+        IsLive(rl_src.high_reg) ||
+        IsPromoted(rl_src.low_reg) ||
+        IsPromoted(rl_src.high_reg) ||
         (rl_dest.location == kLocPhysReg)) {
       // Src is live or promoted or Dest has assigned reg.
-      rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
-      OpRegCopyWide(cu, rl_dest.low_reg, rl_dest.high_reg,
+      rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+      OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg,
                     rl_src.low_reg, rl_src.high_reg);
     } else {
       // Just re-assign the registers.  Dest gets Src's regs
       rl_dest.low_reg = rl_src.low_reg;
       rl_dest.high_reg = rl_src.high_reg;
-      Clobber(cu, rl_src.low_reg);
-      Clobber(cu, rl_src.high_reg);
+      Clobber(rl_src.low_reg);
+      Clobber(rl_src.high_reg);
     }
   } else {
     // Load Src either into promoted Dest or temps allocated for Dest
-    rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
-    LoadValueDirectWide(cu, rl_src, rl_dest.low_reg, rl_dest.high_reg);
+    rl_dest = EvalLoc(rl_dest, kAnyReg, false);
+    LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg);
   }
 
   // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
-  MarkLive(cu, rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
-  MarkDirty(cu, rl_dest);
-  MarkPair(cu, rl_dest.low_reg, rl_dest.high_reg);
+  MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
+  MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+  MarkDirty(rl_dest);
+  MarkPair(rl_dest.low_reg, rl_dest.high_reg);
 
 
-  ResetDefLocWide(cu, rl_dest);
-  if ((IsDirty(cu, rl_dest.low_reg) ||
-      IsDirty(cu, rl_dest.high_reg)) &&
-      (oat_live_out(cu, rl_dest.s_reg_low) ||
-      oat_live_out(cu, GetSRegHi(rl_dest.s_reg_low)))) {
-    def_start = cu->last_lir_insn;
-    DCHECK_EQ((cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)+1),
-              cu->mir_graph->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
-    StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+  ResetDefLocWide(rl_dest);
+  if ((IsDirty(rl_dest.low_reg) ||
+      IsDirty(rl_dest.high_reg)) &&
+      (oat_live_out(rl_dest.s_reg_low) ||
+      oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
+    def_start = last_lir_insn_;
+    DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
+              mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
+    StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
                       rl_dest.low_reg, rl_dest.high_reg);
-    MarkClean(cu, rl_dest);
-    def_end = cu->last_lir_insn;
-    MarkDefWide(cu, rl_dest, def_start, def_end);
+    MarkClean(rl_dest);
+    def_end = last_lir_insn_;
+    MarkDefWide(rl_dest, def_start, def_end);
   }
 }
 
 /* Utilities to load the current Method* */
-void Codegen::LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt)
+void Mir2Lir::LoadCurrMethodDirect(int r_tgt)
 {
-  LoadValueDirectFixed(cu, cu->method_loc, r_tgt);
+  LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
 }
 
-RegLocation Codegen::LoadCurrMethod(CompilationUnit *cu)
+RegLocation Mir2Lir::LoadCurrMethod()
 {
-  return LoadValue(cu, cu->method_loc, kCoreReg);
+  return LoadValue(mir_graph_->GetMethodLoc(), kCoreReg);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/local_optimizations.cc b/src/compiler/dex/quick/local_optimizations.cc
index 3c589373..695b12c 100644
--- a/src/compiler/dex/quick/local_optimizations.cc
+++ b/src/compiler/dex/quick/local_optimizations.cc
@@ -40,12 +40,11 @@
 }
 
 /* Convert a more expensive instruction (ie load) into a move */
-static void ConvertMemOpIntoMove(CompilationUnit* cu, LIR* orig_lir, int dest, int src)
+void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src)
 {
-  Codegen* cg = cu->cg.get();
   /* Insert a move to replace the load */
   LIR* move_lir;
-  move_lir = cg->OpRegCopyNoInsert( cu, dest, src);
+  move_lir = OpRegCopyNoInsert(dest, src);
   /*
    * Insert the converted instruction after the original since the
    * optimization is scannng in the top-down order and the new instruction
@@ -73,9 +72,8 @@
  *   1) They are must-aliases
  *   2) The memory location is not written to in between
  */
-static void ApplyLoadStoreElimination(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
+void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir)
 {
-  Codegen* cg = cu->cg.get();
   LIR* this_lir;
 
   if (head_lir == tail_lir) return;
@@ -86,7 +84,7 @@
 
     int sink_distance = 0;
 
-    uint64_t target_flags = cg->GetTargetInstFlags(this_lir->opcode);
+    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
 
     /* Skip non-interesting instructions */
     if ((this_lir->flags.is_nop == true) ||
@@ -99,14 +97,14 @@
     }
 
     int native_reg_id;
-    if (cu->instruction_set == kX86) {
+    if (cu_->instruction_set == kX86) {
       // If x86, location differs depending on whether memory/reg operation.
-      native_reg_id = (cg->GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
+      native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
           : this_lir->operands[0];
     } else {
       native_reg_id = this_lir->operands[0];
     }
-    bool is_this_lir_load = cg->GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+    bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
     LIR* check_lir;
     /* Use the mem mask to determine the rough memory location */
     uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
@@ -119,7 +117,7 @@
 
     uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
     uint64_t stop_use_reg_mask;
-    if (cu->instruction_set == kX86) {
+    if (cu_->instruction_set == kX86) {
       stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
     } else {
       /*
@@ -128,7 +126,7 @@
        * region bits since stop_mask is used to check data/control
        * dependencies.
        */
-        stop_use_reg_mask = (cg->GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+        stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
     }
 
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
@@ -146,7 +144,7 @@
       /*
        * Potential aliases seen - check the alias relations
        */
-      uint64_t check_flags = cg->GetTargetInstFlags(check_lir->opcode);
+      uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
       // TUNING: Support instructions with multiple register targets.
       if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
         stop_here = true;
@@ -160,14 +158,13 @@
           DCHECK(!(check_flags & IS_STORE));
           /* Same value && same register type */
           if (check_lir->alias_info == this_lir->alias_info &&
-              cg->SameRegType(check_lir->operands[0], native_reg_id)) {
+              SameRegType(check_lir->operands[0], native_reg_id)) {
             /*
              * Different destination register - insert
              * a move
              */
             if (check_lir->operands[0] != native_reg_id) {
-              ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
-                                   native_reg_id);
+              ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
             }
             check_lir->flags.is_nop = true;
           }
@@ -175,7 +172,7 @@
           /* Must alias */
           if (check_lir->alias_info == this_lir->alias_info) {
             /* Only optimize compatible registers */
-            bool reg_compatible = cg->SameRegType(check_lir->operands[0], native_reg_id);
+            bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
             if ((is_this_lir_load && is_check_lir_load) ||
                 (!is_this_lir_load && is_check_lir_load)) {
               /* RAR or RAW */
@@ -186,8 +183,7 @@
                  */
                 if (check_lir->operands[0] !=
                   native_reg_id) {
-                  ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
-                                       native_reg_id);
+                  ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
                 }
                 check_lir->flags.is_nop = true;
               } else {
@@ -237,10 +233,10 @@
       }
 
       if (stop_here == true) {
-        if (cu->instruction_set == kX86) {
+        if (cu_->instruction_set == kX86) {
           // Prevent stores from being sunk between ops that generate ccodes and
           // ops that use them.
-          uint64_t flags = cg->GetTargetInstFlags(check_lir->opcode);
+          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
           if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
             check_lir = PREV_LIR(check_lir);
             sink_distance--;
@@ -249,7 +245,7 @@
         DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
         /* Only sink store instructions */
         if (sink_distance && !is_this_lir_load) {
-          LIR* new_store_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+          LIR* new_store_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
           *new_store_lir = *this_lir;
           /*
            * Stop point found - insert *before* the check_lir
@@ -271,9 +267,8 @@
  * Perform a pass of bottom-up walk, from the second instruction in the
  * superblock, to try to hoist loads to earlier slots.
  */
-void ApplyLoadHoisting(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
+void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir)
 {
-  Codegen* cg = cu->cg.get();
   LIR* this_lir, *check_lir;
   /*
    * Store the list of independent instructions that can be hoisted past.
@@ -289,7 +284,7 @@
 
     if (is_pseudo_opcode(this_lir->opcode)) continue;
 
-    uint64_t target_flags = cg->GetTargetInstFlags(this_lir->opcode);
+    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
     /* Skip non-interesting instructions */
     if ((this_lir->flags.is_nop == true) ||
         ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
@@ -299,7 +294,7 @@
 
     uint64_t stop_use_all_mask = this_lir->use_mask;
 
-    if (cu->instruction_set != kX86) {
+    if (cu_->instruction_set != kX86) {
       /*
        * Branches for null/range checks are marked with the true resource
        * bits, and loads to Dalvik registers, constant pools, and non-alias
@@ -307,7 +302,7 @@
        * conservatively here.
        */
       if (stop_use_all_mask & ENCODE_HEAP_REF) {
-        stop_use_all_mask |= cg->GetPCUseDefEncoding();
+        stop_use_all_mask |= GetPCUseDefEncoding();
       }
     }
 
@@ -391,7 +386,7 @@
       LIR* dep_lir = prev_inst_list[next_slot-1];
       /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
       if (!is_pseudo_opcode(dep_lir->opcode) &&
-        (cg->GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
+        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
         first_slot -= LDLD_DISTANCE;
       }
       /*
@@ -408,7 +403,7 @@
            * If the first instruction is a load, don't hoist anything
            * above it since it is unlikely to be beneficial.
            */
-          if (cg->GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
+          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
           /*
            * If the remaining number of slots is less than LD_LATENCY,
            * insert the hoisted load here.
@@ -428,7 +423,7 @@
          * the remaining instructions are less than LD_LATENCY.
          */
         bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
-            (cg->GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
+            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
         if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
           break;
         }
@@ -437,7 +432,7 @@
       /* Found a slot to hoist to */
       if (slot >= 0) {
         LIR* cur_lir = prev_inst_list[slot];
-        LIR* new_load_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+        LIR* new_load_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
         *new_load_lir = *this_lir;
         /*
          * Insertion is guaranteed to succeed since check_lir
@@ -450,14 +445,13 @@
   }
 }
 
-void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir,
-                    LIR* tail_lir)
+void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir)
 {
-  if (!(cu->disable_opt & (1 << kLoadStoreElimination))) {
-    ApplyLoadStoreElimination(cu, head_lir, tail_lir);
+  if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
+    ApplyLoadStoreElimination(head_lir, tail_lir);
   }
-  if (!(cu->disable_opt & (1 << kLoadHoisting))) {
-    ApplyLoadHoisting(cu, head_lir, tail_lir);
+  if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
+    ApplyLoadHoisting(head_lir, tail_lir);
   }
 }
 
@@ -466,15 +460,14 @@
  * Note: new redundant branches may be inserted later, and we'll
  * use a check in final instruction assembly to nop those out.
  */
-void RemoveRedundantBranches(CompilationUnit* cu)
+void Mir2Lir::RemoveRedundantBranches()
 {
   LIR* this_lir;
-  Codegen* cg = cu->cg.get();
 
-  for (this_lir = cu->first_lir_insn; this_lir != cu->last_lir_insn; this_lir = NEXT_LIR(this_lir)) {
+  for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
 
     /* Branch to the next instruction */
-    if (cg->IsUnconditionalBranch(this_lir)) {
+    if (IsUnconditionalBranch(this_lir)) {
       LIR* next_lir = this_lir;
 
       while (true) {
@@ -490,11 +483,11 @@
 
         /*
          * Found real useful stuff between the branch and the target.
-         * Need to explicitly check the last_lir_insn here because it
+         * Need to explicitly check the last_lir_insn_ here because it
          * might be the last real instruction.
          */
         if (!is_pseudo_opcode(next_lir->opcode) ||
-          (next_lir == cu->last_lir_insn))
+          (next_lir == last_lir_insn_))
           break;
       }
     }
diff --git a/src/compiler/dex/quick/local_optimizations.h b/src/compiler/dex/quick/local_optimizations.h
deleted file mode 100644
index 3a376fe..0000000
--- a/src/compiler/dex/quick/local_optimizations.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
-#define ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
-
-namespace art {
-
-void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir);
-void RemoveRedundantBranches(CompilationUnit* cu);
-
-}  // namespace art
-
-#endif // ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
diff --git a/src/compiler/dex/quick/mips/assemble_mips.cc b/src/compiler/dex/quick/mips/assemble_mips.cc
index c5cd401..5223a0e 100644
--- a/src/compiler/dex/quick/mips/assemble_mips.cc
+++ b/src/compiler/dex/quick/mips/assemble_mips.cc
@@ -15,7 +15,6 @@
  */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
 #include "mips_lir.h"
 
 namespace art {
@@ -81,7 +80,7 @@
  * is expanded to include a nop.  This scheme should be replaced with
  * an assembler pass to fill those slots when possible.
  */
-const MipsEncodingMap MipsCodegen::EncodingMap[kMipsLast] = {
+const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
     ENCODING_MAP(kMips32BitData, 0x00000000,
                  kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_UNARY_OP,
@@ -457,7 +456,7 @@
  * NOTE: An out-of-range bal isn't supported because it should
  * never happen with the current PIC model.
  */
-static void ConvertShortToLongBranch(CompilationUnit* cu, LIR* lir)
+void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir)
 {
   // For conditional branches we'll need to reverse the sense
   bool unconditional = false;
@@ -482,24 +481,24 @@
   }
   LIR* hop_target = NULL;
   if (!unconditional) {
-    hop_target = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
-    LIR* hop_branch = RawLIR(cu, dalvik_offset, opcode, lir->operands[0],
+    hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
+    LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
                             lir->operands[1], 0, 0, 0, hop_target);
     InsertLIRBefore(lir, hop_branch);
   }
-  LIR* curr_pc = RawLIR(cu, dalvik_offset, kMipsCurrPC);
+  LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
   InsertLIRBefore(lir, curr_pc);
-  LIR* anchor = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
-  LIR* delta_hi = RawLIR(cu, dalvik_offset, kMipsDeltaHi, r_AT, 0,
+  LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
+  LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0,
                         reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
   InsertLIRBefore(lir, delta_hi);
   InsertLIRBefore(lir, anchor);
-  LIR* delta_lo = RawLIR(cu, dalvik_offset, kMipsDeltaLo, r_AT, 0,
+  LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0,
                         reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
   InsertLIRBefore(lir, delta_lo);
-  LIR* addu = RawLIR(cu, dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
+  LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
   InsertLIRBefore(lir, addu);
-  LIR* jr = RawLIR(cu, dalvik_offset, kMipsJr, r_AT);
+  LIR* jr = RawLIR(dalvik_offset, kMipsJr, r_AT);
   InsertLIRBefore(lir, jr);
   if (!unconditional) {
     InsertLIRBefore(lir, hop_target);
@@ -513,12 +512,12 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus MipsCodegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr)
+AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr)
 {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
-  for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
     if (lir->opcode < 0) {
       continue;
     }
@@ -550,17 +549,17 @@
         } else {
           // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
           LIR *new_delta_hi =
-              RawLIR(cu, lir->dalvik_offset, kMipsDeltaHi,
+              RawLIR(lir->dalvik_offset, kMipsDeltaHi,
                      lir->operands[0], 0, lir->operands[2],
                      lir->operands[3], 0, lir->target);
           InsertLIRBefore(lir, new_delta_hi);
           LIR *new_delta_lo =
-              RawLIR(cu, lir->dalvik_offset, kMipsDeltaLo,
+              RawLIR(lir->dalvik_offset, kMipsDeltaLo,
                      lir->operands[0], 0, lir->operands[2],
                      lir->operands[3], 0, lir->target);
           InsertLIRBefore(lir, new_delta_lo);
           LIR *new_addu =
-              RawLIR(cu, lir->dalvik_offset, kMipsAddu,
+              RawLIR(lir->dalvik_offset, kMipsAddu,
                      lir->operands[0], lir->operands[0], r_RA);
           InsertLIRBefore(lir, new_addu);
           lir->flags.is_nop = true;
@@ -588,7 +587,7 @@
         }
         if (delta > 131068 || delta < -131069) {
           res = kRetryAll;
-          ConvertShortToLongBranch(cu, lir);
+          ConvertShortToLongBranch(lir);
         } else {
           lir->operands[0] = delta >> 2;
         }
@@ -602,7 +601,7 @@
         }
         if (delta > 131068 || delta < -131069) {
           res = kRetryAll;
-          ConvertShortToLongBranch(cu, lir);
+          ConvertShortToLongBranch(lir);
         } else {
           lir->operands[1] = delta >> 2;
         }
@@ -616,7 +615,7 @@
         }
         if (delta > 131068 || delta < -131069) {
           res = kRetryAll;
-          ConvertShortToLongBranch(cu, lir);
+          ConvertShortToLongBranch(lir);
         } else {
           lir->operands[2] = delta >> 2;
         }
@@ -691,24 +690,24 @@
       }
     }
     // We only support little-endian MIPS.
-    cu->code_buffer.push_back(bits & 0xff);
-    cu->code_buffer.push_back((bits >> 8) & 0xff);
-    cu->code_buffer.push_back((bits >> 16) & 0xff);
-    cu->code_buffer.push_back((bits >> 24) & 0xff);
+    code_buffer_.push_back(bits & 0xff);
+    code_buffer_.push_back((bits >> 8) & 0xff);
+    code_buffer_.push_back((bits >> 16) & 0xff);
+    code_buffer_.push_back((bits >> 24) & 0xff);
     // TUNING: replace with proper delay slot handling
     if (encoder->size == 8) {
       const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
       uint32_t bits = encoder->skeleton;
-      cu->code_buffer.push_back(bits & 0xff);
-      cu->code_buffer.push_back((bits >> 8) & 0xff);
-      cu->code_buffer.push_back((bits >> 16) & 0xff);
-      cu->code_buffer.push_back((bits >> 24) & 0xff);
+      code_buffer_.push_back(bits & 0xff);
+      code_buffer_.push_back((bits >> 8) & 0xff);
+      code_buffer_.push_back((bits >> 16) & 0xff);
+      code_buffer_.push_back((bits >> 24) & 0xff);
     }
   }
   return res;
 }
 
-int MipsCodegen::GetInsnSize(LIR* lir)
+int MipsMir2Lir::GetInsnSize(LIR* lir)
 {
   return EncodingMap[lir->opcode].size;
 }
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index d7f9dce..f73e602 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -17,14 +17,12 @@
 /* This file contains codegen for the Mips ISA */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "mips_lir.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 
 namespace art {
 
-void MipsCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
                                  SpecialCaseHandler special_case)
 {
     // TODO
@@ -61,22 +59,22 @@
  * done:
  *
  */
-void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
                                   RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   int elements = table[1];
   tab_rec->targets =
-      static_cast<LIR**>(NewMem(cu, elements * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+      static_cast<LIR**>(NewMem(cu_, elements * sizeof(LIR*), true, kAllocLIR));
+  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // The table is composed of 8-byte key/disp pairs
   int byte_size = elements * 8;
@@ -84,47 +82,47 @@
   int size_hi = byte_size >> 16;
   int size_lo = byte_size & 0xffff;
 
-  int rEnd = AllocTemp(cu);
+  int rEnd = AllocTemp();
   if (size_hi) {
-    NewLIR2(cu, kMipsLui, rEnd, size_hi);
+    NewLIR2(kMipsLui, rEnd, size_hi);
   }
   // Must prevent code motion for the curr pc pair
-  GenBarrier(cu);  // Scheduling barrier
-  NewLIR0(cu, kMipsCurrPC);  // Really a jal to .+8
+  GenBarrier();  // Scheduling barrier
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot
   if (size_hi) {
-    NewLIR3(cu, kMipsOri, rEnd, rEnd, size_lo);
+    NewLIR3(kMipsOri, rEnd, rEnd, size_lo);
   } else {
-    NewLIR3(cu, kMipsOri, rEnd, r_ZERO, size_lo);
+    NewLIR3(kMipsOri, rEnd, r_ZERO, size_lo);
   }
-  GenBarrier(cu);  // Scheduling barrier
+  GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
-  LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
   // Remember base label so offsets can be computed later
   tab_rec->anchor = base_label;
-  int rBase = AllocTemp(cu);
-  NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+  int rBase = AllocTemp();
+  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
           reinterpret_cast<uintptr_t>(tab_rec));
-  OpRegRegReg(cu, kOpAdd, rEnd, rEnd, rBase);
+  OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
 
   // Grab switch test value
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  rl_src = LoadValue(rl_src, kCoreReg);
 
   // Test loop
-  int r_key = AllocTemp(cu);
-  LIR* loop_label = NewLIR0(cu, kPseudoTargetLabel);
-  LIR* exit_branch = OpCmpBranch(cu , kCondEq, rBase, rEnd, NULL);
-  LoadWordDisp(cu, rBase, 0, r_key);
-  OpRegImm(cu, kOpAdd, rBase, 8);
-  OpCmpBranch(cu, kCondNe, rl_src.low_reg, r_key, loop_label);
-  int r_disp = AllocTemp(cu);
-  LoadWordDisp(cu, rBase, -4, r_disp);
-  OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
-  OpReg(cu, kOpBx, r_RA);
+  int r_key = AllocTemp();
+  LIR* loop_label = NewLIR0(kPseudoTargetLabel);
+  LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
+  LoadWordDisp(rBase, 0, r_key);
+  OpRegImm(kOpAdd, rBase, 8);
+  OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
+  int r_disp = AllocTemp();
+  LoadWordDisp(rBase, -4, r_disp);
+  OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
+  OpReg(kOpBx, r_RA);
 
   // Loop exit
-  LIR* exit_label = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* exit_label = NewLIR0(kPseudoTargetLabel);
   exit_branch->target = exit_label;
 }
 
@@ -141,24 +139,24 @@
  *   jr    r_RA
  * done:
  */
-void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
                                   RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
+  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Get the switch value
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  rl_src = LoadValue(rl_src, kCoreReg);
 
   // Prepare the bias.  If too big, handle 1st stage here
   int low_key = s4FromSwitchData(&table[2]);
@@ -167,51 +165,51 @@
   if (low_key == 0) {
     r_key = rl_src.low_reg;
   } else if ((low_key & 0xffff) != low_key) {
-    r_key = AllocTemp(cu);
-    LoadConstant(cu, r_key, low_key);
+    r_key = AllocTemp();
+    LoadConstant(r_key, low_key);
     large_bias = true;
   } else {
-    r_key = AllocTemp(cu);
+    r_key = AllocTemp();
   }
 
   // Must prevent code motion for the curr pc pair
-  GenBarrier(cu);
-  NewLIR0(cu, kMipsCurrPC);  // Really a jal to .+8
+  GenBarrier();
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot with bias strip
   if (low_key == 0) {
-    NewLIR0(cu, kMipsNop);
+    NewLIR0(kMipsNop);
   } else {
     if (large_bias) {
-      OpRegRegReg(cu, kOpSub, r_key, rl_src.low_reg, r_key);
+      OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
     } else {
-      OpRegRegImm(cu, kOpSub, r_key, rl_src.low_reg, low_key);
+      OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
     }
   }
-  GenBarrier(cu);  // Scheduling barrier
+  GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
-  LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
   // Remember base label so offsets can be computed later
   tab_rec->anchor = base_label;
 
   // Bounds check - if < 0 or >= size continue following switch
-  LIR* branch_over = OpCmpImmBranch(cu, kCondHi, r_key, size-1, NULL);
+  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
 
   // Materialize the table base pointer
-  int rBase = AllocTemp(cu);
-  NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+  int rBase = AllocTemp();
+  NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
           reinterpret_cast<uintptr_t>(tab_rec));
 
   // Load the displacement from the switch table
-  int r_disp = AllocTemp(cu);
-  LoadBaseIndexed(cu, rBase, r_key, r_disp, 2, kWord);
+  int r_disp = AllocTemp();
+  LoadBaseIndexed(rBase, r_key, r_disp, 2, kWord);
 
   // Add to r_AP and go
-  OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
-  OpReg(cu, kOpBx, r_RA);
+  OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
+  OpReg(kOpBx, r_RA);
 
   /* branch_over target here */
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
 }
 
@@ -225,165 +223,165 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void MipsCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      reinterpret_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+      reinterpret_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Making a call - use explicit registers
-  FlushAllRegs(cu);   /* Everything to home location */
-  LockCallTemps(cu);
-  LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);
+  FlushAllRegs();   /* Everything to home location */
+  LockCallTemps();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);
 
   // Must prevent code motion for the curr pc pair
-  GenBarrier(cu);
-  NewLIR0(cu, kMipsCurrPC);  // Really a jal to .+8
+  GenBarrier();
+  NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot with the helper load
-  int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
-  GenBarrier(cu);  // Scheduling barrier
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+  GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
-  LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* base_label = NewLIR0(kPseudoTargetLabel);
 
   // Materialize a pointer to the fill data image
-  NewLIR4(cu, kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
+  NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
           reinterpret_cast<uintptr_t>(tab_rec));
 
   // And go...
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // ( array*, fill_data* )
-  MarkSafepointPC(cu, call_inst);
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt); // ( array*, fill_data* )
+  MarkSafepointPC(call_inst);
 }
 
 /*
  * TODO: implement fast path to short-circuit thin-lock case
  */
-void MipsCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - artLockObjectFromCode(self, obj);
-  int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode));
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
-  MarkSafepointPC(cu, call_inst);
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt);
+  MarkSafepointPC(call_inst);
 }
 
 /*
  * TODO: implement fast path to short-circuit thin-lock case
  */
-void MipsCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - UnlockObjectFromCode(obj);
-  int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
-  ClobberCalleeSave(cu);
-  LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
-  MarkSafepointPC(cu, call_inst);
+  int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+  ClobberCalleeSave();
+  LIR* call_inst = OpReg(kOpBlx, r_tgt);
+  MarkSafepointPC(call_inst);
 }
 
-void MipsCodegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+void MipsMir2Lir::GenMoveException(RegLocation rl_dest)
 {
   int ex_offset = Thread::ExceptionOffset().Int32Value();
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  int reset_reg = AllocTemp(cu);
-  LoadWordDisp(cu, rMIPS_SELF, ex_offset, rl_result.low_reg);
-  LoadConstant(cu, reset_reg, 0);
-  StoreWordDisp(cu, rMIPS_SELF, ex_offset, reset_reg);
-  FreeTemp(cu, reset_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  int reset_reg = AllocTemp();
+  LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
+  LoadConstant(reset_reg, 0);
+  StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
+  FreeTemp(reset_reg);
+  StoreValue(rl_dest, rl_result);
 }
 
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void MipsCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
 {
-  int reg_card_base = AllocTemp(cu);
-  int reg_card_no = AllocTemp(cu);
-  LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
-  LoadWordDisp(cu, rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
-  OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
-  StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  LoadWordDisp(rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
-  FreeTemp(cu, reg_card_base);
-  FreeTemp(cu, reg_card_no);
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
 }
-void MipsCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
 {
-  int spill_count = cu->num_core_spills + cu->num_fp_spills;
+  int spill_count = num_core_spills_ + num_fp_spills_;
   /*
    * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live.  Let the register
    * allocation mechanism know so it doesn't try to use any of them when
    * expanding the frame or flushing.  This leaves the utility
    * code with a single temp: r12.  This should be enough.
    */
-  LockTemp(cu, rMIPS_ARG0);
-  LockTemp(cu, rMIPS_ARG1);
-  LockTemp(cu, rMIPS_ARG2);
-  LockTemp(cu, rMIPS_ARG3);
+  LockTemp(rMIPS_ARG0);
+  LockTemp(rMIPS_ARG1);
+  LockTemp(rMIPS_ARG2);
+  LockTemp(rMIPS_ARG3);
 
   /*
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
-      (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
-  NewLIR0(cu, kPseudoMethodEntry);
-  int check_reg = AllocTemp(cu);
-  int new_sp = AllocTemp(cu);
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+      (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes));
+  NewLIR0(kPseudoMethodEntry);
+  int check_reg = AllocTemp();
+  int new_sp = AllocTemp();
   if (!skip_overflow_check) {
     /* Load stack limit */
-    LoadWordDisp(cu, rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
+    LoadWordDisp(rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
   }
   /* Spill core callee saves */
-  SpillCoreRegs(cu);
+  SpillCoreRegs();
   /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
-  DCHECK_EQ(cu->num_fp_spills, 0);
+  DCHECK_EQ(num_fp_spills_, 0);
   if (!skip_overflow_check) {
-    OpRegRegImm(cu, kOpSub, new_sp, rMIPS_SP, cu->frame_size - (spill_count * 4));
-    GenRegRegCheck(cu, kCondCc, new_sp, check_reg, kThrowStackOverflow);
-    OpRegCopy(cu, rMIPS_SP, new_sp);     // Establish stack
+    OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_size_ - (spill_count * 4));
+    GenRegRegCheck(kCondCc, new_sp, check_reg, kThrowStackOverflow);
+    OpRegCopy(rMIPS_SP, new_sp);     // Establish stack
   } else {
-    OpRegImm(cu, kOpSub, rMIPS_SP, cu->frame_size - (spill_count * 4));
+    OpRegImm(kOpSub, rMIPS_SP, frame_size_ - (spill_count * 4));
   }
 
-  FlushIns(cu, ArgLocs, rl_method);
+  FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(cu, rMIPS_ARG0);
-  FreeTemp(cu, rMIPS_ARG1);
-  FreeTemp(cu, rMIPS_ARG2);
-  FreeTemp(cu, rMIPS_ARG3);
+  FreeTemp(rMIPS_ARG0);
+  FreeTemp(rMIPS_ARG1);
+  FreeTemp(rMIPS_ARG2);
+  FreeTemp(rMIPS_ARG3);
 }
 
-void MipsCodegen::GenExitSequence(CompilationUnit* cu)
+void MipsMir2Lir::GenExitSequence()
 {
   /*
    * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(cu, rMIPS_RET0);
-  LockTemp(cu, rMIPS_RET1);
+  LockTemp(rMIPS_RET0);
+  LockTemp(rMIPS_RET1);
 
-  NewLIR0(cu, kPseudoMethodExit);
-  UnSpillCoreRegs(cu);
-  OpReg(cu, kOpBx, r_RA);
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
+  OpReg(kOpBx, r_RA);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index c9d0e21..f681eda 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -22,64 +22,60 @@
 
 namespace art {
 
-class MipsCodegen : public Codegen {
+class MipsMir2Lir : public Mir2Lir {
   public:
+
+    MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+
     // Required for target - codegen utilities.
-    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit);
-    virtual int LoadHelper(CompilationUnit* cu, int offset);
-    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                              OpSize size, int s_reg);
-    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
-                                  int r_dest_hi, int s_reg);
-    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
-                                 OpSize size);
-    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
-                                     int s_reg);
-    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
-    virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
-    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                               OpSize size);
-    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
-                                   int r_src_hi);
-    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
-                                 OpSize size);
-    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                      int displacement, int r_src, int r_src_hi, OpSize size,
-                                      int s_reg);
-    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+    virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    virtual int LoadHelper(int offset);
+    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    virtual LIR* LoadConstantNoClobber(int r_dest, int value);
+    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
 
     // Required for target - register utilities.
     virtual bool IsFpReg(int reg);
     virtual bool SameRegType(int reg1, int reg2);
-    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
-    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTemp(bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
     virtual int S2d(int low_reg, int high_reg);
     virtual int TargetReg(SpecialTargetRegister reg);
-    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
-    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
-    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegisterInfo* GetRegInfo(int reg);
+    virtual RegLocation GetReturnAlt();
+    virtual RegLocation GetReturnWideAlt();
     virtual RegLocation LocCReturn();
     virtual RegLocation LocCReturnDouble();
     virtual RegLocation LocCReturnFloat();
     virtual RegLocation LocCReturnWide();
     virtual uint32_t FpRegMask();
-    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
-    virtual void AdjustSpillMask(CompilationUnit* cu);
-    virtual void ClobberCalleeSave(CompilationUnit *cu);
-    virtual void FlushReg(CompilationUnit* cu, int reg);
-    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-    virtual void FreeCallTemps(CompilationUnit* cu);
-    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
-    virtual void LockCallTemps(CompilationUnit* cu);
-    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
-    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+    virtual uint64_t GetRegMaskCommon(int reg);
+    virtual void AdjustSpillMask();
+    virtual void ClobberCalleeSave();
+    virtual void FlushReg(int reg);
+    virtual void FlushRegWide(int reg1, int reg2);
+    virtual void FreeCallTemps();
+    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps();
+    virtual void MarkPreservedSingle(int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc();
 
     // Required for target - miscellaneous.
-    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
     virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual void SetupTargetResourceMasks(LIR* lir);
     virtual const char* GetTargetInstFmt(int opcode);
     virtual const char* GetTargetInstName(int opcode);
     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
@@ -89,118 +85,97 @@
     virtual bool IsUnconditionalBranch(LIR* lir);
 
     // Required for target - Dalvik-level generators.
-    virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
-                                RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
+                                RegLocation rl_src, int scale);
+    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_dest, int scale);
-    virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
-    virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2);
-    virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
                                  RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src);
-    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
-    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
-    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
-    virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2);
-    virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
-                                int offset, ThrowKind kind);
-    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
-                                  bool is_div);
-    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
-                                     bool is_div);
-    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
-    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                                  RegLocation rl_method);
-    virtual void GenExitSequence(CompilationUnit* cu);
-    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                                  RegLocation rl_src);
-    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double);
-    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
-    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
-    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                               RegLocation rl_result, int lit, int first_bit,
-                                               int second_bit);
-    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                                SpecialCaseHandler special_case);
+    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CallInfo* info);
+    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    virtual void GenExitSequence();
+    virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    virtual void GenMoveException(RegLocation rl_dest);
+    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit);
+    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
 
     // Required for target - single operation generators.
-    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
-                             LIR* target);
-    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
-                                LIR* target);
-    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
-    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
-                                LIR* target);
-    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
-    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
-    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
-    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
-    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
-    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
-    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
-    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
-    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                             int r_src2);
-    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
-    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
-    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
-    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
-                       int offset);
-    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
-                               int src_hi);
-    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+    virtual LIR* OpUnconditionalBranch(LIR* target);
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    virtual LIR* OpFpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpIT(ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(int reg, LIR* target);
+    virtual LIR* OpReg(OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    virtual LIR* OpTestSuspend(LIR* target);
+    virtual LIR* OpThreadMem(OpKind op, int thread_offset);
+    virtual LIR* OpVldm(int rBase, int count);
+    virtual LIR* OpVstm(int rBase, int count);
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    virtual void OpTlsCmp(int offset, int val);
 
-    LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                          int r_dest_hi, OpSize size, int s_reg);
-    LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                           int r_src_hi, OpSize size);
-    void SpillCoreRegs(CompilationUnit* cu);
-    void UnSpillCoreRegs(CompilationUnit* cu);
+    LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
+                          int s_reg);
+    LIR* StoreBaseDispBody(int rBase, int displacement, int r_src, int r_src_hi, OpSize size);
+    void SpillCoreRegs();
+    void UnSpillCoreRegs();
     static const MipsEncodingMap EncodingMap[kMipsLast];
     bool InexpensiveConstantInt(int32_t value);
     bool InexpensiveConstantFloat(int32_t value);
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void ConvertShortToLongBranch(LIR* lir);
+
 };
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/mips/fp_mips.cc b/src/compiler/dex/quick/mips/fp_mips.cc
index 96007d8..5ddec00 100644
--- a/src/compiler/dex/quick/mips/fp_mips.cc
+++ b/src/compiler/dex/quick/mips/fp_mips.cc
@@ -15,14 +15,12 @@
  */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "mips_lir.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 
 namespace art {
 
-void MipsCodegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kMipsNop;
@@ -51,25 +49,25 @@
       break;
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
-      rl_result = GetReturn(cu, true);
-      StoreValue(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
       return;
     case Instruction::NEG_FLOAT:
-      GenNegFloat(cu, rl_dest, rl_src1);
+      GenNegFloat(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-  rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-  rl_src2 = LoadValue(cu, rl_src2, kFPReg);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-  NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  StoreValue(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   int op = kMipsNop;
@@ -94,30 +92,30 @@
       break;
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(cu, true);
-      StoreValueWide(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
       return;
     case Instruction::NEG_DOUBLE:
-      GenNegDouble(cu, rl_dest, rl_src1);
+      GenNegDouble(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unpexpected opcode: " << opcode;
   }
-  rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
   DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
   DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+  NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
           S2d(rl_src2.low_reg, rl_src2.high_reg));
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
                                 RegLocation rl_src)
 {
   int op = kMipsNop;
@@ -137,45 +135,45 @@
       op = kMipsFcvtdw;
       break;
     case Instruction::FLOAT_TO_INT:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_INT:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
       return;
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_FLOAT:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
   if (rl_src.wide) {
-    rl_src = LoadValueWide(cu, rl_src, kFPReg);
+    rl_src = LoadValueWide(rl_src, kFPReg);
     src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
   } else {
-    rl_src = LoadValue(cu, rl_src, kFPReg);
+    rl_src = LoadValue(rl_src, kFPReg);
     src_reg = rl_src.low_reg;
   }
   if (rl_dest.wide) {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
-    StoreValueWide(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, rl_result.low_reg, src_reg);
-    StoreValue(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
-void MipsCodegen::GenCmpFP(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2)
 {
   bool wide = true;
@@ -199,48 +197,48 @@
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-  FlushAllRegs(cu);
-  LockCallTemps(cu);
+  FlushAllRegs();
+  LockCallTemps();
   if (wide) {
-    LoadValueDirectWideFixed(cu, rl_src1, rMIPS_FARG0, rMIPS_FARG1);
-    LoadValueDirectWideFixed(cu, rl_src2, rMIPS_FARG2, rMIPS_FARG3);
+    LoadValueDirectWideFixed(rl_src1, rMIPS_FARG0, rMIPS_FARG1);
+    LoadValueDirectWideFixed(rl_src2, rMIPS_FARG2, rMIPS_FARG3);
   } else {
-    LoadValueDirectFixed(cu, rl_src1, rMIPS_FARG0);
-    LoadValueDirectFixed(cu, rl_src2, rMIPS_FARG2);
+    LoadValueDirectFixed(rl_src1, rMIPS_FARG0);
+    LoadValueDirectFixed(rl_src2, rMIPS_FARG2);
   }
-  int r_tgt = LoadHelper(cu, offset);
+  int r_tgt = LoadHelper(offset);
   // NOTE: not a safepoint
-  OpReg(cu, kOpBlx, r_tgt);
-  RegLocation rl_result = GetReturn(cu, false);
-  StoreValue(cu, rl_dest, rl_result);
+  OpReg(kOpBlx, r_tgt);
+  RegLocation rl_result = GetReturn(false);
+  StoreValue(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
                                 bool gt_bias, bool is_double)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
 }
 
-void MipsCodegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+  StoreValue(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
-  OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-bool MipsCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool MipsMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
 {
   // TODO: need Mips implementation
   return false;
diff --git a/src/compiler/dex/quick/mips/int_mips.cc b/src/compiler/dex/quick/mips/int_mips.cc
index b1fa623..d65d08a 100644
--- a/src/compiler/dex/quick/mips/int_mips.cc
+++ b/src/compiler/dex/quick/mips/int_mips.cc
@@ -17,8 +17,6 @@
 /* This file contains codegen for the Mips ISA */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "mips_lir.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 
@@ -40,29 +38,29 @@
  * finish:
  *
  */
-void MipsCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  int t0 = AllocTemp(cu);
-  int t1 = AllocTemp(cu);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  NewLIR3(cu, kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
-  NewLIR3(cu, kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
-  NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
-  LIR* branch = OpCmpImmBranch(cu, kCondNe, rl_result.low_reg, 0, NULL);
-  NewLIR3(cu, kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
-  NewLIR3(cu, kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
-  NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
-  FreeTemp(cu, t0);
-  FreeTemp(cu, t1);
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  int t0 = AllocTemp();
+  int t1 = AllocTemp();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
+  NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
+  NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+  LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL);
+  NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
+  NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
+  NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+  FreeTemp(t0);
+  FreeTemp(t1);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch->target = target;
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
-LIR* MipsCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
                               LIR* target)
 {
   LIR* branch;
@@ -115,31 +113,31 @@
       return NULL;
   }
   if (cmp_zero) {
-    branch = NewLIR2(cu, br_op, src1, src2);
+    branch = NewLIR2(br_op, src1, src2);
   } else {
-    int t_reg = AllocTemp(cu);
+    int t_reg = AllocTemp();
     if (swapped) {
-      NewLIR3(cu, slt_op, t_reg, src2, src1);
+      NewLIR3(slt_op, t_reg, src2, src1);
     } else {
-      NewLIR3(cu, slt_op, t_reg, src1, src2);
+      NewLIR3(slt_op, t_reg, src1, src2);
     }
-    branch = NewLIR1(cu, br_op, t_reg);
-    FreeTemp(cu, t_reg);
+    branch = NewLIR1(br_op, t_reg);
+    FreeTemp(t_reg);
   }
   branch->target = target;
   return branch;
 }
 
-LIR* MipsCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
                                  int check_value, LIR* target)
 {
   LIR* branch;
   if (check_value != 0) {
     // TUNING: handle s16 & kCondLt/Mi case using slti
-    int t_reg = AllocTemp(cu);
-    LoadConstant(cu, t_reg, check_value);
-    branch = OpCmpBranch(cu, cond, reg, t_reg, target);
-    FreeTemp(cu, t_reg);
+    int t_reg = AllocTemp();
+    LoadConstant(t_reg, check_value);
+    branch = OpCmpBranch(cond, reg, t_reg, target);
+    FreeTemp(t_reg);
     return branch;
   }
   MipsOpCode opc;
@@ -153,37 +151,37 @@
     case kCondNe: opc = kMipsBnez; break;
     default:
       // Tuning: use slti when applicable
-      int t_reg = AllocTemp(cu);
-      LoadConstant(cu, t_reg, check_value);
-      branch = OpCmpBranch(cu, cond, reg, t_reg, target);
-      FreeTemp(cu, t_reg);
+      int t_reg = AllocTemp();
+      LoadConstant(t_reg, check_value);
+      branch = OpCmpBranch(cond, reg, t_reg, target);
+      FreeTemp(t_reg);
       return branch;
   }
-  branch = NewLIR1(cu, opc, reg);
+  branch = NewLIR1(opc, reg);
   branch->target = target;
   return branch;
 }
 
-LIR* MipsCodegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
 {
   if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
-    return OpFpRegCopy(cu, r_dest, r_src);
-  LIR* res = RawLIR(cu, cu->current_dalvik_offset, kMipsMove,
+    return OpFpRegCopy(r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
             r_dest, r_src);
-  if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-LIR* MipsCodegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsMir2Lir::OpRegCopy(int r_dest, int r_src)
 {
-  LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
-  AppendLIR(cu, res);
+  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
   return res;
 }
 
-void MipsCodegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi, int src_lo,
+void MipsMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
                                 int src_hi)
 {
   bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
@@ -192,172 +190,172 @@
   assert(MIPS_FPREG(dest_lo) == MIPS_FPREG(dest_hi));
   if (dest_fp) {
     if (src_fp) {
-      OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
     } else {
        /* note the operands are swapped for the mtc1 instr */
-      NewLIR2(cu, kMipsMtc1, src_lo, dest_lo);
-      NewLIR2(cu, kMipsMtc1, src_hi, dest_hi);
+      NewLIR2(kMipsMtc1, src_lo, dest_lo);
+      NewLIR2(kMipsMtc1, src_hi, dest_hi);
     }
   } else {
     if (src_fp) {
-      NewLIR2(cu, kMipsMfc1, dest_lo, src_lo);
-      NewLIR2(cu, kMipsMfc1, dest_hi, src_hi);
+      NewLIR2(kMipsMfc1, dest_lo, src_lo);
+      NewLIR2(kMipsMfc1, dest_hi, src_hi);
     } else {
       // Handle overlap
       if (src_hi == dest_lo) {
-        OpRegCopy(cu, dest_hi, src_hi);
-        OpRegCopy(cu, dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
       } else {
-        OpRegCopy(cu, dest_lo, src_lo);
-        OpRegCopy(cu, dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
       }
     }
   }
 }
 
-void MipsCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for select";
 }
 
-void MipsCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
 }
 
-LIR* MipsCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code,
                     int reg1, int base, int offset, ThrowKind kind)
 {
   LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
   return NULL;
 }
 
-RegLocation MipsCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
                                     bool is_div)
 {
-  NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, reg2);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_div) {
-    NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
+    NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
   } else {
-    NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
+    NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
   }
   return rl_result;
 }
 
-RegLocation MipsCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
                                        bool is_div)
 {
-  int t_reg = AllocTemp(cu);
-  NewLIR3(cu, kMipsAddiu, t_reg, r_ZERO, lit);
-  NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, t_reg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsAddiu, t_reg, r_ZERO, lit);
+  NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (is_div) {
-    NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
+    NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
   } else {
-    NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
+    NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
   }
-  FreeTemp(cu, t_reg);
+  FreeTemp(t_reg);
   return rl_result;
 }
 
-void MipsCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
 {
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void MipsCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void MipsMir2Lir::OpTlsCmp(int offset, int val)
 {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
-bool MipsCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
-  DCHECK_NE(cu->instruction_set, kThumb2);
+bool MipsMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
   return false;
 }
 
-bool MipsCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
-  DCHECK_NE(cu->instruction_set, kThumb2);
+bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
   return false;
 }
 
-LIR* MipsCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+LIR* MipsMir2Lir::OpPcRelLoad(int reg, LIR* target) {
   LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
   return NULL;
 }
 
-LIR* MipsCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* MipsMir2Lir::OpVldm(int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVldm for Mips";
   return NULL;
 }
 
-LIR* MipsCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* MipsMir2Lir::OpVstm(int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVstm for Mips";
   return NULL;
 }
 
-void MipsCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                 RegLocation rl_result, int lit,
                                                 int first_bit, int second_bit)
 {
-  int t_reg = AllocTemp(cu);
-  OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
-  OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
-  FreeTemp(cu, t_reg);
+  int t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+  FreeTemp(t_reg);
   if (first_bit != 0) {
-    OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
   }
 }
 
-void MipsCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void MipsMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
 {
-  int t_reg = AllocTemp(cu);
-  OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
-  GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
-  FreeTemp(cu, t_reg);
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
+  FreeTemp(t_reg);
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* MipsCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* MipsMir2Lir::OpTestSuspend(LIR* target)
 {
-  OpRegImm(cu, kOpSub, rMIPS_SUSPEND, 1);
-  return OpCmpImmBranch(cu, (target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
+  OpRegImm(kOpSub, rMIPS_SUSPEND, 1);
+  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
 }
 
 // Decrement register and branch on condition
-LIR* MipsCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
 {
-  OpRegImm(cu, kOpSub, reg, 1);
-  return OpCmpImmBranch(cu, c_code, reg, 0, target);
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
 }
 
-bool MipsCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+bool MipsMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
                                      RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
   return false;
 }
 
-LIR* MipsCodegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
+LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide)
 {
   LOG(FATAL) << "Unexpected use of OpIT in Mips";
   return NULL;
 }
 
-void MipsCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
 }
 
-void MipsCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   /*
    *  [v1 v0] =  [a1 a0] + [a3 a2];
    *  addu v0,a2,a0
@@ -366,21 +364,21 @@
    *  addu v1,v1,t1
    */
 
-  OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
-  int t_reg = AllocTemp(cu);
-  OpRegRegReg(cu, kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
-  NewLIR3(cu, kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
-  OpRegRegReg(cu, kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
-  FreeTemp(cu, t_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
+  NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
-  rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   /*
    *  [v1 v0] =  [a1 a0] - [a3 a2];
    *  sltu  t1,a0,a2
@@ -389,19 +387,19 @@
    *  subu  v1,v1,t1
    */
 
-  int t_reg = AllocTemp(cu);
-  NewLIR3(cu, kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
-  OpRegRegReg(cu, kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
-  OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
-  OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
-  FreeTemp(cu, t_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
 {
-  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   /*
    *  [v1 v0] =  -[a1 a0]
    *  negu  v0,a0
@@ -410,28 +408,28 @@
    *  subu  v1,v1,t1
    */
 
-  OpRegReg(cu, kOpNeg, rl_result.low_reg, rl_src.low_reg);
-  OpRegReg(cu, kOpNeg, rl_result.high_reg, rl_src.high_reg);
-  int t_reg = AllocTemp(cu);
-  NewLIR3(cu, kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
-  OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
-  FreeTemp(cu, t_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+  OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg);
+  int t_reg = AllocTemp();
+  NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
+  OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+  FreeTemp(t_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
 }
 
-void MipsCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
 }
 
-void MipsCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void MipsMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
                              RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
@@ -440,15 +438,15 @@
 /*
  * Generate array load
  */
-void MipsCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_dest, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
   RegLocation rl_result;
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
-  rl_index = LoadValue(cu, rl_index, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
 
   if (size == kLong || size == kDouble) {
     data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
@@ -457,54 +455,54 @@
   }
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
-  int reg_ptr = AllocTemp(cu);
+  int reg_ptr = AllocTemp();
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
-    reg_len = AllocTemp(cu);
+    reg_len = AllocTemp();
     /* Get len */
-    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
   /* reg_ptr -> array data */
-  OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
-  FreeTemp(cu, rl_array.low_reg);
+  OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+  FreeTemp(rl_array.low_reg);
   if ((size == kLong) || (size == kDouble)) {
     if (scale) {
-      int r_new_index = AllocTemp(cu);
-      OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
-      OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
-      FreeTemp(cu, r_new_index);
+      int r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
     } else {
-      OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+      OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
     }
-    FreeTemp(cu, rl_index.low_reg);
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+    FreeTemp(rl_index.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
       // TODO: change kCondCS to a more meaningful name, is the sense of
       // carry-set/clear flipped?
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
-    LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+    LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
 
-    FreeTemp(cu, reg_ptr);
-    StoreValueWide(cu, rl_dest, rl_result);
+    FreeTemp(reg_ptr);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
       // TODO: change kCondCS to a more meaningful name, is the sense of
       // carry-set/clear flipped?
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
-    LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+    LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
 
-    FreeTemp(cu, reg_ptr);
-    StoreValue(cu, rl_dest, rl_result);
+    FreeTemp(reg_ptr);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
@@ -512,7 +510,7 @@
  * Generate array store
  *
  */
-void MipsCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_src, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
@@ -525,58 +523,58 @@
     data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
   }
 
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
-  rl_index = LoadValue(cu, rl_index, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
   int reg_ptr = INVALID_REG;
-  if (IsTemp(cu, rl_array.low_reg)) {
-    Clobber(cu, rl_array.low_reg);
+  if (IsTemp(rl_array.low_reg)) {
+    Clobber(rl_array.low_reg);
     reg_ptr = rl_array.low_reg;
   } else {
-    reg_ptr = AllocTemp(cu);
-    OpRegCopy(cu, reg_ptr, rl_array.low_reg);
+    reg_ptr = AllocTemp();
+    OpRegCopy(reg_ptr, rl_array.low_reg);
   }
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
-    reg_len = AllocTemp(cu);
+    reg_len = AllocTemp();
     //NOTE: max live temps(4) here.
     /* Get len */
-    LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+    LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
   /* reg_ptr -> array data */
-  OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+  OpRegImm(kOpAdd, reg_ptr, data_offset);
   /* at this point, reg_ptr points to array, 2 live temps */
   if ((size == kLong) || (size == kDouble)) {
     //TUNING: specific wide routine that can handle fp regs
     if (scale) {
-      int r_new_index = AllocTemp(cu);
-      OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
-      OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
-      FreeTemp(cu, r_new_index);
+      int r_new_index = AllocTemp();
+      OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
+      OpRegReg(kOpAdd, reg_ptr, r_new_index);
+      FreeTemp(r_new_index);
     } else {
-      OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+      OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
     }
-    rl_src = LoadValueWide(cu, rl_src, reg_class);
+    rl_src = LoadValueWide(rl_src, reg_class);
 
     if (needs_range_check) {
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
 
-    StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+    StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
 
-    FreeTemp(cu, reg_ptr);
+    FreeTemp(reg_ptr);
   } else {
-    rl_src = LoadValue(cu, rl_src, reg_class);
+    rl_src = LoadValue(rl_src, reg_class);
     if (needs_range_check) {
-      GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
-      FreeTemp(cu, reg_len);
+      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      FreeTemp(reg_len);
     }
-    StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+    StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
                      scale, size);
   }
 }
@@ -585,75 +583,75 @@
  * Generate array store
  *
  */
-void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale)
 {
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
 
-  FlushAllRegs(cu);  // Use explicit registers
-  LockCallTemps(cu);
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
 
   int r_value = TargetReg(kArg0);  // Register holding value
   int r_array_class = TargetReg(kArg1);  // Register holding array's Class
   int r_array = TargetReg(kArg2);  // Register holding array
   int r_index = TargetReg(kArg3);  // Register holding index into array
 
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Grab array
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Grab value
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Grab index
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
 
-  GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
 
   // Store of null?
-  LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
 
   // Get the array's class.
-  LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Reload array
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Reload index
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Reload value
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
   r_array_class = INVALID_REG;
 
   // Branch here if value to be stored == null
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   null_value_check->target = target;
 
   bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
   int reg_len = INVALID_REG;
   if (needs_range_check) {
     reg_len = TargetReg(kArg1);
-    LoadWordDisp(cu, r_array, len_offset, reg_len);  // Get len
+    LoadWordDisp(r_array, len_offset, reg_len);  // Get len
   }
   /* r_ptr -> array data */
-  int r_ptr = AllocTemp(cu);
-  OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+  int r_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, r_ptr, r_array, data_offset);
   if (needs_range_check) {
-    GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+    GenRegRegCheck(kCondCs, r_index, reg_len, kThrowArrayBounds);
   }
-  StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
-  FreeTemp(cu, r_ptr);
-  FreeTemp(cu, r_index);
-  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
-    MarkGCCard(cu, r_value, r_array);
+  StoreBaseIndexed(r_ptr, r_index, r_value, scale, kWord);
+  FreeTemp(r_ptr);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
   }
 }
 
-void MipsCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                     RegLocation rl_src1, RegLocation rl_shift)
 {
   // Default implementation is just to ignore the constant case.
-  GenShiftOpLong(cu, opcode, rl_dest, rl_src1, rl_shift);
+  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
 }
 
-void MipsCodegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
                                     RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   // Default - bail to non-const handler.
-  GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index 85e8a9b..8d342af 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -15,9 +15,7 @@
  */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "mips_lir.h"
 
 #include <string>
@@ -37,32 +35,32 @@
 static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
                          r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
 
-RegLocation MipsCodegen::LocCReturn()
+RegLocation MipsMir2Lir::LocCReturn()
 {
   RegLocation res = MIPS_LOC_C_RETURN;
   return res;
 }
 
-RegLocation MipsCodegen::LocCReturnWide()
+RegLocation MipsMir2Lir::LocCReturnWide()
 {
   RegLocation res = MIPS_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation MipsCodegen::LocCReturnFloat()
+RegLocation MipsMir2Lir::LocCReturnFloat()
 {
   RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation MipsCodegen::LocCReturnDouble()
+RegLocation MipsMir2Lir::LocCReturnDouble()
 {
   RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int MipsCodegen::TargetReg(SpecialTargetRegister reg) {
+int MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rMIPS_SELF; break;
@@ -87,19 +85,19 @@
 }
 
 // Create a double from a pair of singles.
-int MipsCodegen::S2d(int low_reg, int high_reg)
+int MipsMir2Lir::S2d(int low_reg, int high_reg)
 {
   return MIPS_S2D(low_reg, high_reg);
 }
 
 // Return mask to strip off fp reg flags and bias.
-uint32_t MipsCodegen::FpRegMask()
+uint32_t MipsMir2Lir::FpRegMask()
 {
   return MIPS_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool MipsCodegen::SameRegType(int reg1, int reg2)
+bool MipsMir2Lir::SameRegType(int reg1, int reg2)
 {
   return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2));
 }
@@ -107,7 +105,7 @@
 /*
  * Decode the register id.
  */
-uint64_t MipsCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t MipsMir2Lir::GetRegMaskCommon(int reg)
 {
   uint64_t seed;
   int shift;
@@ -124,18 +122,18 @@
   return (seed << shift);
 }
 
-uint64_t MipsCodegen::GetPCUseDefEncoding()
+uint64_t MipsMir2Lir::GetPCUseDefEncoding()
 {
   return ENCODE_MIPS_REG_PC;
 }
 
 
-void MipsCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir)
 {
-  DCHECK_EQ(cu->instruction_set, kMips);
+  DCHECK_EQ(cu_->instruction_set, kMips);
 
   // Mips-specific resource map setup here.
-  uint64_t flags = MipsCodegen::EncodingMap[lir->opcode].flags;
+  uint64_t flags = MipsMir2Lir::EncodingMap[lir->opcode].flags;
 
   if (flags & REG_DEF_SP) {
     lir->def_mask |= ENCODE_MIPS_REG_SP;
@@ -163,7 +161,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.c.
  */
-std::string MipsCodegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
+std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
 {
   std::string buf;
   int i;
@@ -256,7 +254,7 @@
 }
 
 // FIXME: need to redo resource maps for MIPS - fix this at that time
-void MipsCodegen::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
+void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -307,10 +305,10 @@
  * machinery is in place, always spill lr.
  */
 
-void MipsCodegen::AdjustSpillMask(CompilationUnit* cu)
+void MipsMir2Lir::AdjustSpillMask()
 {
-  cu->core_spill_mask |= (1 << r_RA);
-  cu->num_core_spills++;
+  core_spill_mask_ |= (1 << r_RA);
+  num_core_spills_++;
 }
 
 /*
@@ -319,15 +317,15 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void MipsCodegen::MarkPreservedSingle(CompilationUnit* cu, int s_reg, int reg)
+void MipsMir2Lir::MarkPreservedSingle(int s_reg, int reg)
 {
   LOG(FATAL) << "No support yet for promoted FP regs";
 }
 
-void MipsCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void MipsMir2Lir::FlushRegWide(int reg1, int reg2)
 {
-  RegisterInfo* info1 = GetRegInfo(cu, reg1);
-  RegisterInfo* info2 = GetRegInfo(cu, reg2);
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
   DCHECK(info1 && info2 && info1->pair && info2->pair &&
          (info1->partner == info2->reg) &&
          (info2->partner == info1->reg));
@@ -339,114 +337,114 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
+    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(cu, rMIPS_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rMIPS_SP, VRegOffset(v_reg), info1->reg, info1->partner);
   }
 }
 
-void MipsCodegen::FlushReg(CompilationUnit* cu, int reg)
+void MipsMir2Lir::FlushReg(int reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
-    StoreBaseDisp(cu, rMIPS_SP, VRegOffset(cu, v_reg), reg, kWord);
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rMIPS_SP, VRegOffset(v_reg), reg, kWord);
   }
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool MipsCodegen::IsFpReg(int reg) {
+bool MipsMir2Lir::IsFpReg(int reg) {
   return MIPS_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void MipsCodegen::ClobberCalleeSave(CompilationUnit *cu)
+void MipsMir2Lir::ClobberCalleeSave()
 {
-  Clobber(cu, r_ZERO);
-  Clobber(cu, r_AT);
-  Clobber(cu, r_V0);
-  Clobber(cu, r_V1);
-  Clobber(cu, r_A0);
-  Clobber(cu, r_A1);
-  Clobber(cu, r_A2);
-  Clobber(cu, r_A3);
-  Clobber(cu, r_T0);
-  Clobber(cu, r_T1);
-  Clobber(cu, r_T2);
-  Clobber(cu, r_T3);
-  Clobber(cu, r_T4);
-  Clobber(cu, r_T5);
-  Clobber(cu, r_T6);
-  Clobber(cu, r_T7);
-  Clobber(cu, r_T8);
-  Clobber(cu, r_T9);
-  Clobber(cu, r_K0);
-  Clobber(cu, r_K1);
-  Clobber(cu, r_GP);
-  Clobber(cu, r_FP);
-  Clobber(cu, r_RA);
-  Clobber(cu, r_F0);
-  Clobber(cu, r_F1);
-  Clobber(cu, r_F2);
-  Clobber(cu, r_F3);
-  Clobber(cu, r_F4);
-  Clobber(cu, r_F5);
-  Clobber(cu, r_F6);
-  Clobber(cu, r_F7);
-  Clobber(cu, r_F8);
-  Clobber(cu, r_F9);
-  Clobber(cu, r_F10);
-  Clobber(cu, r_F11);
-  Clobber(cu, r_F12);
-  Clobber(cu, r_F13);
-  Clobber(cu, r_F14);
-  Clobber(cu, r_F15);
+  Clobber(r_ZERO);
+  Clobber(r_AT);
+  Clobber(r_V0);
+  Clobber(r_V1);
+  Clobber(r_A0);
+  Clobber(r_A1);
+  Clobber(r_A2);
+  Clobber(r_A3);
+  Clobber(r_T0);
+  Clobber(r_T1);
+  Clobber(r_T2);
+  Clobber(r_T3);
+  Clobber(r_T4);
+  Clobber(r_T5);
+  Clobber(r_T6);
+  Clobber(r_T7);
+  Clobber(r_T8);
+  Clobber(r_T9);
+  Clobber(r_K0);
+  Clobber(r_K1);
+  Clobber(r_GP);
+  Clobber(r_FP);
+  Clobber(r_RA);
+  Clobber(r_F0);
+  Clobber(r_F1);
+  Clobber(r_F2);
+  Clobber(r_F3);
+  Clobber(r_F4);
+  Clobber(r_F5);
+  Clobber(r_F6);
+  Clobber(r_F7);
+  Clobber(r_F8);
+  Clobber(r_F9);
+  Clobber(r_F10);
+  Clobber(r_F11);
+  Clobber(r_F12);
+  Clobber(r_F13);
+  Clobber(r_F14);
+  Clobber(r_F15);
 }
 
-RegLocation MipsCodegen::GetReturnWideAlt(CompilationUnit* cu)
+RegLocation MipsMir2Lir::GetReturnWideAlt()
 {
   UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
   RegLocation res = LocCReturnWide();
   return res;
 }
 
-RegLocation MipsCodegen::GetReturnAlt(CompilationUnit* cu)
+RegLocation MipsMir2Lir::GetReturnAlt()
 {
   UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
   RegLocation res = LocCReturn();
   return res;
 }
 
-RegisterInfo* MipsCodegen::GetRegInfo(CompilationUnit* cu, int reg)
+MipsMir2Lir::RegisterInfo* MipsMir2Lir::GetRegInfo(int reg)
 {
-  return MIPS_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & MIPS_FP_REG_MASK]
-            : &cu->reg_pool->core_regs[reg];
+  return MIPS_FPREG(reg) ? &reg_pool_->FPRegs[reg & MIPS_FP_REG_MASK]
+            : &reg_pool_->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void MipsCodegen::LockCallTemps(CompilationUnit* cu)
+void MipsMir2Lir::LockCallTemps()
 {
-  LockTemp(cu, rMIPS_ARG0);
-  LockTemp(cu, rMIPS_ARG1);
-  LockTemp(cu, rMIPS_ARG2);
-  LockTemp(cu, rMIPS_ARG3);
+  LockTemp(rMIPS_ARG0);
+  LockTemp(rMIPS_ARG1);
+  LockTemp(rMIPS_ARG2);
+  LockTemp(rMIPS_ARG3);
 }
 
 /* To be used when explicitly managing register use */
-void MipsCodegen::FreeCallTemps(CompilationUnit* cu)
+void MipsMir2Lir::FreeCallTemps()
 {
-  FreeTemp(cu, rMIPS_ARG0);
-  FreeTemp(cu, rMIPS_ARG1);
-  FreeTemp(cu, rMIPS_ARG2);
-  FreeTemp(cu, rMIPS_ARG3);
+  FreeTemp(rMIPS_ARG0);
+  FreeTemp(rMIPS_ARG1);
+  FreeTemp(rMIPS_ARG2);
+  FreeTemp(rMIPS_ARG3);
 }
 
-void MipsCodegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
-  NewLIR1(cu, kMipsSync, 0 /* Only stype currently supported */);
+  NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
 #endif
 }
 
@@ -454,7 +452,7 @@
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int MipsCodegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+int MipsMir2Lir::AllocTypedTempPair(bool fp_hint,
                   int reg_class)
 {
   int high_reg;
@@ -462,69 +460,67 @@
   int res = 0;
 
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    low_reg = AllocTempDouble(cu);
+    low_reg = AllocTempDouble();
     high_reg = low_reg + 1;
     res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
     return res;
   }
 
-  low_reg = AllocTemp(cu);
-  high_reg = AllocTemp(cu);
+  low_reg = AllocTemp();
+  high_reg = AllocTemp();
   res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
   return res;
 }
 
-int MipsCodegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class)
+int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class)
 {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
 {
-    return AllocTempFloat(cu);
+    return AllocTempFloat();
 }
-  return AllocTemp(cu);
+  return AllocTemp();
 }
 
-void MipsCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
+void MipsMir2Lir::CompilerInitializeRegAlloc()
 {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  RegisterPool *pool =
-      static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
-  cu->reg_pool = pool;
-  pool->num_core_regs = num_regs;
-  pool->core_regs = static_cast<RegisterInfo*>
-     (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
-  pool->num_fp_regs = num_fp_regs;
-  pool->FPRegs = static_cast<RegisterInfo*>
-      (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
-  CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
-  CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs = static_cast<RegisterInfo*>
+     (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs = static_cast<RegisterInfo*>
+      (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
   for (int i = 0; i < num_reserved; i++) {
     if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
       //To measure cost of suspend check
       continue;
     }
-    MarkInUse(cu, ReservedRegs[i]);
+    MarkInUse(ReservedRegs[i]);
   }
   // Mark temp regs - all others not in use can be used for promotion
   for (int i = 0; i < num_temps; i++) {
-    MarkTemp(cu, core_temps[i]);
+    MarkTemp(core_temps[i]);
   }
   for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(cu, fp_temps[i]);
+    MarkTemp(fp_temps[i]);
   }
 }
 
-void MipsCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free)
+void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
     (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
     // No overlap, free both
-    FreeTemp(cu, rl_free.low_reg);
-    FreeTemp(cu, rl_free.high_reg);
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
   }
 }
 /*
@@ -533,76 +529,76 @@
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
-int MipsCodegen::LoadHelper(CompilationUnit* cu, int offset)
+int MipsMir2Lir::LoadHelper(int offset)
 {
-  LoadWordDisp(cu, rMIPS_SELF, offset, r_T9);
+  LoadWordDisp(rMIPS_SELF, offset, r_T9);
   return r_T9;
 }
 
-void MipsCodegen::SpillCoreRegs(CompilationUnit* cu)
+void MipsMir2Lir::SpillCoreRegs()
 {
-  if (cu->num_core_spills == 0) {
+  if (num_core_spills_ == 0) {
     return;
   }
-  uint32_t mask = cu->core_spill_mask;
-  int offset = cu->num_core_spills * 4;
-  OpRegImm(cu, kOpSub, rMIPS_SP, offset);
+  uint32_t mask = core_spill_mask_;
+  int offset = num_core_spills_ * 4;
+  OpRegImm(kOpSub, rMIPS_SP, offset);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
       offset -= 4;
-      StoreWordDisp(cu, rMIPS_SP, offset, reg);
+      StoreWordDisp(rMIPS_SP, offset, reg);
     }
   }
 }
 
-void MipsCodegen::UnSpillCoreRegs(CompilationUnit* cu)
+void MipsMir2Lir::UnSpillCoreRegs()
 {
-  if (cu->num_core_spills == 0) {
+  if (num_core_spills_ == 0) {
     return;
   }
-  uint32_t mask = cu->core_spill_mask;
-  int offset = cu->frame_size;
+  uint32_t mask = core_spill_mask_;
+  int offset = frame_size_;
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
       offset -= 4;
-      LoadWordDisp(cu, rMIPS_SP, offset, reg);
+      LoadWordDisp(rMIPS_SP, offset, reg);
     }
   }
-  OpRegImm(cu, kOpAdd, rMIPS_SP, cu->frame_size);
+  OpRegImm(kOpAdd, rMIPS_SP, frame_size_);
 }
 
-bool MipsCodegen::IsUnconditionalBranch(LIR* lir)
+bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir)
 {
   return (lir->opcode == kMipsB);
 }
 
-/* Common initialization routine for an architecture family */
-bool InitMipsCodegen(CompilationUnit* cu)
-{
-  cu->cg.reset(new MipsCodegen());
+MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
   for (int i = 0; i < kMipsLast; i++) {
-    if (MipsCodegen::EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << MipsCodegen::EncodingMap[i].name
+    if (MipsMir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
                  << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(MipsCodegen::EncodingMap[i].opcode);
+                 << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
     }
   }
-  return true;
 }
 
-uint64_t MipsCodegen::GetTargetInstFlags(int opcode)
-{
-  return MipsCodegen::EncodingMap[opcode].flags;
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
+  return new MipsMir2Lir(cu, mir_graph);
 }
 
-const char* MipsCodegen::GetTargetInstName(int opcode)
+uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode)
 {
-  return MipsCodegen::EncodingMap[opcode].name;
+  return MipsMir2Lir::EncodingMap[opcode].flags;
 }
 
-const char* MipsCodegen::GetTargetInstFmt(int opcode)
+const char* MipsMir2Lir::GetTargetInstName(int opcode)
 {
-  return MipsCodegen::EncodingMap[opcode].fmt;
+  return MipsMir2Lir::EncodingMap[opcode].name;
+}
+
+const char* MipsMir2Lir::GetTargetInstFmt(int opcode)
+{
+  return MipsMir2Lir::EncodingMap[opcode].fmt;
 }
 
 } // namespace art
diff --git a/src/compiler/dex/quick/mips/utility_mips.cc b/src/compiler/dex/quick/mips/utility_mips.cc
index 168eb68..5f9f8c5 100644
--- a/src/compiler/dex/quick/mips/utility_mips.cc
+++ b/src/compiler/dex/quick/mips/utility_mips.cc
@@ -15,14 +15,12 @@
  */
 
 #include "codegen_mips.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "mips_lir.h"
 
 namespace art {
 
 /* This file contains codegen for the MIPS32 ISA. */
-LIR* MipsCodegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* MipsMir2Lir::OpFpRegCopy(int r_dest, int r_src)
 {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
@@ -45,29 +43,29 @@
       opcode = kMipsMfc1;
     }
   }
-  LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_src, r_dest);
-  if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src, r_dest);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-bool MipsCodegen::InexpensiveConstantInt(int32_t value)
+bool MipsMir2Lir::InexpensiveConstantInt(int32_t value)
 {
   return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
 }
 
-bool MipsCodegen::InexpensiveConstantFloat(int32_t value)
+bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value)
 {
   return false;  // TUNING
 }
 
-bool MipsCodegen::InexpensiveConstantLong(int64_t value)
+bool MipsMir2Lir::InexpensiveConstantLong(int64_t value)
 {
   return false;  // TUNING
 }
 
-bool MipsCodegen::InexpensiveConstantDouble(int64_t value)
+bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value)
 {
   return false; // TUNING
 }
@@ -81,7 +79,7 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR* MipsCodegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value)
 {
   LIR *res;
 
@@ -89,38 +87,38 @@
   int is_fp_reg = MIPS_FPREG(r_dest);
   if (is_fp_reg) {
     DCHECK(MIPS_SINGLEREG(r_dest));
-    r_dest = AllocTemp(cu);
+    r_dest = AllocTemp();
   }
 
   /* See if the value can be constructed cheaply */
   if (value == 0) {
-    res = NewLIR2(cu, kMipsMove, r_dest, r_ZERO);
+    res = NewLIR2(kMipsMove, r_dest, r_ZERO);
   } else if ((value > 0) && (value <= 65535)) {
-    res = NewLIR3(cu, kMipsOri, r_dest, r_ZERO, value);
+    res = NewLIR3(kMipsOri, r_dest, r_ZERO, value);
   } else if ((value < 0) && (value >= -32768)) {
-    res = NewLIR3(cu, kMipsAddiu, r_dest, r_ZERO, value);
+    res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
   } else {
-    res = NewLIR2(cu, kMipsLui, r_dest, value>>16);
+    res = NewLIR2(kMipsLui, r_dest, value>>16);
     if (value & 0xffff)
-      NewLIR3(cu, kMipsOri, r_dest, r_dest, value);
+      NewLIR3(kMipsOri, r_dest, r_dest, value);
   }
 
   if (is_fp_reg) {
-    NewLIR2(cu, kMipsMtc1, r_dest, r_dest_save);
-    FreeTemp(cu, r_dest);
+    NewLIR2(kMipsMtc1, r_dest, r_dest_save);
+    FreeTemp(r_dest);
   }
 
   return res;
 }
 
-LIR* MipsCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target)
 {
-  LIR* res = NewLIR1(cu, kMipsB, 0 /* offset to be patched during assembly*/ );
+  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ );
   res->target = target;
   return res;
 }
 
-LIR* MipsCodegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+LIR* MipsMir2Lir::OpReg(OpKind op, int r_dest_src)
 {
   MipsOpCode opcode = kMipsNop;
   switch (op) {
@@ -128,15 +126,15 @@
       opcode = kMipsJalr;
       break;
     case kOpBx:
-      return NewLIR1(cu, kMipsJr, r_dest_src);
+      return NewLIR1(kMipsJr, r_dest_src);
       break;
     default:
       LOG(FATAL) << "Bad case in OpReg";
   }
-  return NewLIR2(cu, opcode, r_RA, r_dest_src);
+  return NewLIR2(opcode, r_RA, r_dest_src);
 }
 
-LIR* MipsCodegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1,
+LIR* MipsMir2Lir::OpRegImm(OpKind op, int r_dest_src1,
           int value)
 {
   LIR *res;
@@ -146,29 +144,29 @@
   MipsOpCode opcode = kMipsNop;
   switch (op) {
     case kOpAdd:
-      return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
       break;
     case kOpSub:
-      return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+      return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
       break;
     default:
       LOG(FATAL) << "Bad case in OpRegImm";
       break;
   }
   if (short_form)
-    res = NewLIR2(cu, opcode, r_dest_src1, abs_value);
+    res = NewLIR2(opcode, r_dest_src1, abs_value);
   else {
-    int r_scratch = AllocTemp(cu);
-    res = LoadConstant(cu, r_scratch, value);
+    int r_scratch = AllocTemp();
+    res = LoadConstant(r_scratch, value);
     if (op == kOpCmp)
-      NewLIR2(cu, opcode, r_dest_src1, r_scratch);
+      NewLIR2(opcode, r_dest_src1, r_scratch);
     else
-      NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_scratch);
+      NewLIR3(opcode, r_dest_src1, r_dest_src1, r_scratch);
   }
   return res;
 }
 
-LIR* MipsCodegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int r_src2)
+LIR* MipsMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2)
 {
   MipsOpCode opcode = kMipsNop;
   switch (op) {
@@ -207,10 +205,10 @@
       LOG(FATAL) << "bad case in OpRegRegReg";
       break;
   }
-  return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
+  return NewLIR3(opcode, r_dest, r_src1, r_src2);
 }
 
-LIR* MipsCodegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int value)
+LIR* MipsMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value)
 {
   LIR *res;
   MipsOpCode opcode = kMipsNop;
@@ -285,21 +283,21 @@
   }
 
   if (short_form)
-    res = NewLIR3(cu, opcode, r_dest, r_src1, value);
+    res = NewLIR3(opcode, r_dest, r_src1, value);
   else {
     if (r_dest != r_src1) {
-      res = LoadConstant(cu, r_dest, value);
-      NewLIR3(cu, opcode, r_dest, r_src1, r_dest);
+      res = LoadConstant(r_dest, value);
+      NewLIR3(opcode, r_dest, r_src1, r_dest);
     } else {
-      int r_scratch = AllocTemp(cu);
-      res = LoadConstant(cu, r_scratch, value);
-      NewLIR3(cu, opcode, r_dest, r_src1, r_scratch);
+      int r_scratch = AllocTemp();
+      res = LoadConstant(r_scratch, value);
+      NewLIR3(opcode, r_dest, r_src1, r_scratch);
     }
   }
   return res;
 }
 
-LIR* MipsCodegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* MipsMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
 {
   MipsOpCode opcode = kMipsNop;
   LIR *res;
@@ -308,57 +306,57 @@
       opcode = kMipsMove;
       break;
     case kOpMvn:
-      return NewLIR3(cu, kMipsNor, r_dest_src1, r_src2, r_ZERO);
+      return NewLIR3(kMipsNor, r_dest_src1, r_src2, r_ZERO);
     case kOpNeg:
-      return NewLIR3(cu, kMipsSubu, r_dest_src1, r_ZERO, r_src2);
+      return NewLIR3(kMipsSubu, r_dest_src1, r_ZERO, r_src2);
     case kOpAdd:
     case kOpAnd:
     case kOpMul:
     case kOpOr:
     case kOpSub:
     case kOpXor:
-      return OpRegRegReg(cu, op, r_dest_src1, r_dest_src1, r_src2);
+      return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
     case kOp2Byte:
 #if __mips_isa_rev>=2
-      res = NewLIR2(cu, kMipsSeb, r_dest_src1, r_src2);
+      res = NewLIR2(kMipsSeb, r_dest_src1, r_src2);
 #else
-      res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 24);
-      OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 24);
+      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
 #endif
       return res;
     case kOp2Short:
 #if __mips_isa_rev>=2
-      res = NewLIR2(cu, kMipsSeh, r_dest_src1, r_src2);
+      res = NewLIR2(kMipsSeh, r_dest_src1, r_src2);
 #else
-      res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 16);
-      OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 16);
+      res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+      OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
 #endif
       return res;
     case kOp2Char:
-       return NewLIR3(cu, kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
+       return NewLIR3(kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
     default:
       LOG(FATAL) << "Bad case in OpRegReg";
       break;
   }
-  return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+  return NewLIR2(opcode, r_dest_src1, r_src2);
 }
 
-LIR* MipsCodegen::LoadConstantWide(CompilationUnit *cu, int r_dest_lo, int r_dest_hi, int64_t value)
+LIR* MipsMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
 {
   LIR *res;
-  res = LoadConstantNoClobber(cu, r_dest_lo, Low32Bits(value));
-  LoadConstantNoClobber(cu, r_dest_hi, High32Bits(value));
+  res = LoadConstantNoClobber(r_dest_lo, Low32Bits(value));
+  LoadConstantNoClobber(r_dest_hi, High32Bits(value));
   return res;
 }
 
 /* Load value from base + scaled index. */
-LIR* MipsCodegen::LoadBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_dest,
+LIR* MipsMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
                                   int scale, OpSize size)
 {
   LIR *first = NULL;
   LIR *res;
   MipsOpCode opcode = kMipsNop;
-  int t_reg = AllocTemp(cu);
+  int t_reg = AllocTemp();
 
   if (MIPS_FPREG(r_dest)) {
     DCHECK(MIPS_SINGLEREG(r_dest));
@@ -370,10 +368,10 @@
   }
 
   if (!scale) {
-    first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
+    first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
   } else {
-    first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
-    NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
   }
 
   switch (size) {
@@ -399,19 +397,19 @@
       LOG(FATAL) << "Bad case in LoadBaseIndexed";
   }
 
-  res = NewLIR3(cu, opcode, r_dest, 0, t_reg);
-  FreeTemp(cu, t_reg);
+  res = NewLIR3(opcode, r_dest, 0, t_reg);
+  FreeTemp(t_reg);
   return (first) ? first : res;
 }
 
 /* store value base base + scaled index. */
-LIR* MipsCodegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+LIR* MipsMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
                                    int scale, OpSize size)
 {
   LIR *first = NULL;
   MipsOpCode opcode = kMipsNop;
   int r_new_index = r_index;
-  int t_reg = AllocTemp(cu);
+  int t_reg = AllocTemp();
 
   if (MIPS_FPREG(r_src)) {
     DCHECK(MIPS_SINGLEREG(r_src));
@@ -423,10 +421,10 @@
   }
 
   if (!scale) {
-    first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
+    first = NewLIR3(kMipsAddu, t_reg , rBase, r_index);
   } else {
-    first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
-    NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
+    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+    NewLIR3(kMipsAddu, t_reg , rBase, t_reg);
   }
 
   switch (size) {
@@ -447,12 +445,12 @@
     default:
       LOG(FATAL) << "Bad case in StoreBaseIndexed";
   }
-  NewLIR3(cu, opcode, r_src, 0, t_reg);
-  FreeTemp(cu, r_new_index);
+  NewLIR3(opcode, r_src, 0, t_reg);
+  FreeTemp(r_new_index);
   return first;
 }
 
-LIR* MipsCodegen::LoadBaseDispBody(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+LIR* MipsMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
                                    int r_dest_hi, OpSize size, int s_reg)
 /*
  * Load value from base + displacement.  Optionally perform null check
@@ -517,54 +515,54 @@
 
   if (short_form) {
     if (!pair) {
-      load = res = NewLIR3(cu, opcode, r_dest, displacement, rBase);
+      load = res = NewLIR3(opcode, r_dest, displacement, rBase);
     } else {
-      load = res = NewLIR3(cu, opcode, r_dest,
+      load = res = NewLIR3(opcode, r_dest,
                            displacement + LOWORD_OFFSET, rBase);
-      load2 = NewLIR3(cu, opcode, r_dest_hi,
+      load2 = NewLIR3(opcode, r_dest_hi,
                       displacement + HIWORD_OFFSET, rBase);
     }
   } else {
     if (pair) {
-      int r_tmp = AllocFreeTemp(cu);
-      res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
-      load = NewLIR3(cu, opcode, r_dest, LOWORD_OFFSET, r_tmp);
-      load2 = NewLIR3(cu, opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
-      FreeTemp(cu, r_tmp);
+      int r_tmp = AllocFreeTemp();
+      res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
+      load = NewLIR3(opcode, r_dest, LOWORD_OFFSET, r_tmp);
+      load2 = NewLIR3(opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
+      FreeTemp(r_tmp);
     } else {
-      int r_tmp = (rBase == r_dest) ? AllocFreeTemp(cu) : r_dest;
-      res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
-      load = NewLIR3(cu, opcode, r_dest, 0, r_tmp);
+      int r_tmp = (rBase == r_dest) ? AllocFreeTemp() : r_dest;
+      res = OpRegRegImm(kOpAdd, r_tmp, rBase, displacement);
+      load = NewLIR3(opcode, r_dest, 0, r_tmp);
       if (r_tmp != r_dest)
-        FreeTemp(cu, r_tmp);
+        FreeTemp(r_tmp);
     }
   }
 
   if (rBase == rMIPS_SP) {
-    AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+    AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                             true /* is_load */, pair /* is64bit */);
     if (pair) {
-      AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
+      AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
                               true /* is_load */, pair /* is64bit */);
     }
   }
   return load;
 }
 
-LIR* MipsCodegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+LIR* MipsMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
                                OpSize size, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1,
+  return LoadBaseDispBody(rBase, displacement, r_dest, -1,
                           size, s_reg);
 }
 
-LIR* MipsCodegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* MipsMir2Lir::LoadBaseDispWide(int rBase, int displacement,
                                    int r_dest_lo, int r_dest_hi, int s_reg)
 {
-  return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+  return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
-LIR* MipsCodegen::StoreBaseDispBody(CompilationUnit *cu, int rBase, int displacement,
+LIR* MipsMir2Lir::StoreBaseDispBody(int rBase, int displacement,
                                     int r_src, int r_src_hi, OpSize size)
 {
   LIR *res;
@@ -616,30 +614,30 @@
 
   if (short_form) {
     if (!pair) {
-      store = res = NewLIR3(cu, opcode, r_src, displacement, rBase);
+      store = res = NewLIR3(opcode, r_src, displacement, rBase);
     } else {
-      store = res = NewLIR3(cu, opcode, r_src, displacement + LOWORD_OFFSET,
+      store = res = NewLIR3(opcode, r_src, displacement + LOWORD_OFFSET,
                             rBase);
-      store2 = NewLIR3(cu, opcode, r_src_hi, displacement + HIWORD_OFFSET,
+      store2 = NewLIR3(opcode, r_src_hi, displacement + HIWORD_OFFSET,
                        rBase);
     }
   } else {
-    int r_scratch = AllocTemp(cu);
-    res = OpRegRegImm(cu, kOpAdd, r_scratch, rBase, displacement);
+    int r_scratch = AllocTemp();
+    res = OpRegRegImm(kOpAdd, r_scratch, rBase, displacement);
     if (!pair) {
-      store =  NewLIR3(cu, opcode, r_src, 0, r_scratch);
+      store =  NewLIR3(opcode, r_src, 0, r_scratch);
     } else {
-      store =  NewLIR3(cu, opcode, r_src, LOWORD_OFFSET, r_scratch);
-      store2 = NewLIR3(cu, opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
+      store =  NewLIR3(opcode, r_src, LOWORD_OFFSET, r_scratch);
+      store2 = NewLIR3(opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
     }
-    FreeTemp(cu, r_scratch);
+    FreeTemp(r_scratch);
   }
 
   if (rBase == rMIPS_SP) {
-    AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+    AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                             false /* is_load */, pair /* is64bit */);
     if (pair) {
-      AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
+      AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
                               false /* is_load */, pair /* is64bit */);
     }
   }
@@ -647,56 +645,52 @@
   return res;
 }
 
-LIR* MipsCodegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_src,
+LIR* MipsMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
                                 OpSize size)
 {
-  return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
+  return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
 }
 
-LIR* MipsCodegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement,
                                     int r_src_lo, int r_src_hi)
 {
-  return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
+  return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-LIR* MipsCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset)
 {
   LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
   return NULL;
 }
 
-LIR* MipsCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp)
 {
   LOG(FATAL) << "Unexpected use of OpMem for MIPS";
   return NULL;
 }
 
-LIR* MipsCodegen::StoreBaseIndexedDisp(CompilationUnit *cu,
-                          int rBase, int r_index, int scale, int displacement,
-                          int r_src, int r_src_hi,
-                          OpSize size, int s_reg)
+LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement,
+                                        int r_src, int r_src_hi, OpSize size, int s_reg)
 {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
   return NULL;
 }
 
-LIR* MipsCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+LIR* MipsMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
               int offset)
 {
   LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
   return NULL;
 }
 
-LIR* MipsCodegen::LoadBaseIndexedDisp(CompilationUnit *cu,
-                         int rBase, int r_index, int scale, int displacement,
-                         int r_dest, int r_dest_hi,
-                         OpSize size, int s_reg)
+LIR* MipsMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_dest, int r_dest_hi, OpSize size, int s_reg)
 {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
   return NULL;
 }
 
-LIR* MipsCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
 {
   LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
   return NULL;
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 7aec54e..1f50914 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -18,9 +18,6 @@
 
 #include "compiler/dex/compiler_internals.h"
 #include "compiler/dex/dataflow_iterator.h"
-#include "local_optimizations.h"
-#include "codegen_util.h"
-#include "ralloc_util.h"
 
 namespace art {
 
@@ -29,13 +26,11 @@
  * load/store utilities here, or target-dependent genXX() handlers
  * when necessary.
  */
-static void CompileDalvikInstruction(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
-                                     LIR* label_list)
+void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list)
 {
-  Codegen* cg = cu->cg.get();
   RegLocation rl_src[3];
-  RegLocation rl_dest = GetBadLoc();
-  RegLocation rl_result = GetBadLoc();
+  RegLocation rl_dest = mir_graph_->GetBadLoc();
+  RegLocation rl_result = mir_graph_->GetBadLoc();
   Instruction::Code opcode = mir->dalvikInsn.opcode;
   int opt_flags = mir->optimization_flags;
   uint32_t vB = mir->dalvikInsn.vB;
@@ -44,38 +39,38 @@
   // Prep Src and Dest locations.
   int next_sreg = 0;
   int next_loc = 0;
-  int attrs = oat_data_flow_attributes[opcode];
-  rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
+  int attrs = mir_graph_->oat_data_flow_attributes_[opcode];
+  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
   if (attrs & DF_UA) {
     if (attrs & DF_A_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
       next_sreg+= 2;
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
       next_sreg++;
     }
   }
   if (attrs & DF_UB) {
     if (attrs & DF_B_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
       next_sreg+= 2;
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
       next_sreg++;
     }
   }
   if (attrs & DF_UC) {
     if (attrs & DF_C_WIDE) {
-      rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
     } else {
-      rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
     }
   }
   if (attrs & DF_DA) {
     if (attrs & DF_A_WIDE) {
-      rl_dest = GetDestWide(cu, mir);
+      rl_dest = mir_graph_->GetDestWide(mir);
     } else {
-      rl_dest = GetDest(cu, mir);
+      rl_dest = mir_graph_->GetDest(mir);
     }
   }
   switch (opcode) {
@@ -83,47 +78,46 @@
       break;
 
     case Instruction::MOVE_EXCEPTION:
-      cg->GenMoveException(cu, rl_dest);
+      GenMoveException(rl_dest);
       break;
 
     case Instruction::RETURN_VOID:
-      if (((cu->access_flags & kAccConstructor) != 0) &&
-          cu->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu->dex_file,
-                                                          cu->class_def_idx)) {
-        cg->GenMemBarrier(cu, kStoreStore);
+      if (((cu_->access_flags & kAccConstructor) != 0) &&
+          cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
+                                                          cu_->class_def_idx)) {
+        GenMemBarrier(kStoreStore);
       }
-      if (!(cu->attributes & METHOD_IS_LEAF)) {
-        cg->GenSuspendTest(cu, opt_flags);
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
       }
       break;
 
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT:
-      if (!(cu->attributes & METHOD_IS_LEAF)) {
-        cg->GenSuspendTest(cu, opt_flags);
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
       }
-      cg->StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
+      StoreValue(GetReturn(cu_->shorty[0] == 'F'), rl_src[0]);
       break;
 
     case Instruction::RETURN_WIDE:
-      if (!(cu->attributes & METHOD_IS_LEAF)) {
-        cg->GenSuspendTest(cu, opt_flags);
+      if (!mir_graph_->MethodIsLeaf()) {
+        GenSuspendTest(opt_flags);
       }
-      cg->StoreValueWide(cu, GetReturnWide(cu,
-                       cu->shorty[0] == 'D'), rl_src[0]);
+      StoreValueWide(GetReturnWide(cu_->shorty[0] == 'D'), rl_src[0]);
       break;
 
     case Instruction::MOVE_RESULT_WIDE:
       if (opt_flags & MIR_INLINED)
         break;  // Nop - combined w/ previous invoke.
-      cg->StoreValueWide(cu, rl_dest, GetReturnWide(cu, rl_dest.fp));
+      StoreValueWide(rl_dest, GetReturnWide(rl_dest.fp));
       break;
 
     case Instruction::MOVE_RESULT:
     case Instruction::MOVE_RESULT_OBJECT:
       if (opt_flags & MIR_INLINED)
         break;  // Nop - combined w/ previous invoke.
-      cg->StoreValue(cu, rl_dest, GetReturn(cu, rl_dest.fp));
+      StoreValue(rl_dest, GetReturn(rl_dest.fp));
       break;
 
     case Instruction::MOVE:
@@ -132,144 +126,144 @@
     case Instruction::MOVE_OBJECT_16:
     case Instruction::MOVE_FROM16:
     case Instruction::MOVE_OBJECT_FROM16:
-      cg->StoreValue(cu, rl_dest, rl_src[0]);
+      StoreValue(rl_dest, rl_src[0]);
       break;
 
     case Instruction::MOVE_WIDE:
     case Instruction::MOVE_WIDE_16:
     case Instruction::MOVE_WIDE_FROM16:
-      cg->StoreValueWide(cu, rl_dest, rl_src[0]);
+      StoreValueWide(rl_dest, rl_src[0]);
       break;
 
     case Instruction::CONST:
     case Instruction::CONST_4:
     case Instruction::CONST_16:
-      rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB);
-      cg->StoreValue(cu, rl_dest, rl_result);
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantNoClobber(rl_result.low_reg, vB);
+      StoreValue(rl_dest, rl_result);
       if (vB == 0) {
-        cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
+        Workaround7250540(rl_dest, rl_result.low_reg);
       }
       break;
 
     case Instruction::CONST_HIGH16:
-      rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB << 16);
-      cg->StoreValue(cu, rl_dest, rl_result);
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantNoClobber(rl_result.low_reg, vB << 16);
+      StoreValue(rl_dest, rl_result);
       if (vB == 0) {
-        cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
+        Workaround7250540(rl_dest, rl_result.low_reg);
       }
       break;
 
     case Instruction::CONST_WIDE_16:
     case Instruction::CONST_WIDE_32:
-      rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg,
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
                            static_cast<int64_t>(static_cast<int32_t>(vB)));
-      cg->StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
       break;
 
     case Instruction::CONST_WIDE:
-      rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg, mir->dalvikInsn.vB_wide);
-      cg->StoreValueWide(cu, rl_dest, rl_result);
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg, mir->dalvikInsn.vB_wide);
+      StoreValueWide(rl_dest, rl_result);
       break;
 
     case Instruction::CONST_WIDE_HIGH16:
-      rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
-      cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg,
+      rl_result = EvalLoc(rl_dest, kAnyReg, true);
+      LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
                            static_cast<int64_t>(vB) << 48);
-      cg->StoreValueWide(cu, rl_dest, rl_result);
+      StoreValueWide(rl_dest, rl_result);
       break;
 
     case Instruction::MONITOR_ENTER:
-      cg->GenMonitorEnter(cu, opt_flags, rl_src[0]);
+      GenMonitorEnter(opt_flags, rl_src[0]);
       break;
 
     case Instruction::MONITOR_EXIT:
-      cg->GenMonitorExit(cu, opt_flags, rl_src[0]);
+      GenMonitorExit(opt_flags, rl_src[0]);
       break;
 
     case Instruction::CHECK_CAST:
-      cg->GenCheckCast(cu, vB, rl_src[0]);
+      GenCheckCast(vB, rl_src[0]);
       break;
 
     case Instruction::INSTANCE_OF:
-      cg->GenInstanceof(cu, vC, rl_dest, rl_src[0]);
+      GenInstanceof(vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::NEW_INSTANCE:
-      cg->GenNewInstance(cu, vB, rl_dest);
+      GenNewInstance(vB, rl_dest);
       break;
 
     case Instruction::THROW:
-      cg->GenThrow(cu, rl_src[0]);
+      GenThrow(rl_src[0]);
       break;
 
     case Instruction::ARRAY_LENGTH:
       int len_offset;
       len_offset = mirror::Array::LengthOffset().Int32Value();
-      rl_src[0] = cg->LoadValue(cu, rl_src[0], kCoreReg);
-      cg->GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      cg->LoadWordDisp(cu, rl_src[0].low_reg, len_offset, rl_result.low_reg);
-      cg->StoreValue(cu, rl_dest, rl_result);
+      rl_src[0] = LoadValue(rl_src[0], kCoreReg);
+      GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg);
+      StoreValue(rl_dest, rl_result);
       break;
 
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
-      cg->GenConstString(cu, vB, rl_dest);
+      GenConstString(vB, rl_dest);
       break;
 
     case Instruction::CONST_CLASS:
-      cg->GenConstClass(cu, vB, rl_dest);
+      GenConstClass(vB, rl_dest);
       break;
 
     case Instruction::FILL_ARRAY_DATA:
-      cg->GenFillArrayData(cu, vB, rl_src[0]);
+      GenFillArrayData(vB, rl_src[0]);
       break;
 
     case Instruction::FILLED_NEW_ARRAY:
-      cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
+      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
                         false /* not range */));
       break;
 
     case Instruction::FILLED_NEW_ARRAY_RANGE:
-      cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
+      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
                         true /* range */));
       break;
 
     case Instruction::NEW_ARRAY:
-      cg->GenNewArray(cu, vC, rl_dest, rl_src[0]);
+      GenNewArray(vC, rl_dest, rl_src[0]);
       break;
 
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32:
       if (bb->taken->start_offset <= mir->offset) {
-        cg->GenSuspendTestAndBranch(cu, opt_flags, &label_list[bb->taken->id]);
+        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
       } else {
-        cg->OpUnconditionalBranch(cu, &label_list[bb->taken->id]);
+        OpUnconditionalBranch(&label_list[bb->taken->id]);
       }
       break;
 
     case Instruction::PACKED_SWITCH:
-      cg->GenPackedSwitch(cu, mir, vB, rl_src[0]);
+      GenPackedSwitch(mir, vB, rl_src[0]);
       break;
 
     case Instruction::SPARSE_SWITCH:
-      cg->GenSparseSwitch(cu, mir, vB, rl_src[0]);
+      GenSparseSwitch(mir, vB, rl_src[0]);
       break;
 
     case Instruction::CMPL_FLOAT:
     case Instruction::CMPG_FLOAT:
     case Instruction::CMPL_DOUBLE:
     case Instruction::CMPG_DOUBLE:
-      cg->GenCmpFP(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::CMP_LONG:
-      cg->GenCmpLong(cu, rl_dest, rl_src[0], rl_src[1]);
+      GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IF_EQ:
@@ -284,18 +278,18 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const && rl_src[1].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg),
-                                       cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
+        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
+                                       mir_graph_->ConstantValue(rl_src[1].orig_sreg));
         if (is_taken && backward_branch) {
-          cg->GenSuspendTest(cu, opt_flags);
+          GenSuspendTest(opt_flags);
         }
         int id = is_taken ? bb->taken->id : bb->fall_through->id;
-        cg->OpUnconditionalBranch(cu, &label_list[id]);
+        OpUnconditionalBranch(&label_list[id]);
       } else {
         if (backward_branch) {
-          cg->GenSuspendTest(cu, opt_flags);
+          GenSuspendTest(opt_flags);
         }
-        cg->GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
+        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
                                 fall_through);
       }
       break;
@@ -313,126 +307,126 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg), 0);
+        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
         if (is_taken && backward_branch) {
-          cg->GenSuspendTest(cu, opt_flags);
+          GenSuspendTest(opt_flags);
         }
         int id = is_taken ? bb->taken->id : bb->fall_through->id;
-        cg->OpUnconditionalBranch(cu, &label_list[id]);
+        OpUnconditionalBranch(&label_list[id]);
       } else {
         if (backward_branch) {
-          cg->GenSuspendTest(cu, opt_flags);
+          GenSuspendTest(opt_flags);
         }
-        cg->GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
+        GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
       }
       break;
       }
 
     case Instruction::AGET_WIDE:
-      cg->GenArrayGet(cu, opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
+      GenArrayGet(opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
       break;
     case Instruction::AGET:
     case Instruction::AGET_OBJECT:
-      cg->GenArrayGet(cu, opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
+      GenArrayGet(opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
       break;
     case Instruction::AGET_BOOLEAN:
-      cg->GenArrayGet(cu, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
       break;
     case Instruction::AGET_BYTE:
-      cg->GenArrayGet(cu, opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+      GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
       break;
     case Instruction::AGET_CHAR:
-      cg->GenArrayGet(cu, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
       break;
     case Instruction::AGET_SHORT:
-      cg->GenArrayGet(cu, opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+      GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
       break;
     case Instruction::APUT_WIDE:
-      cg->GenArrayPut(cu, opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
+      GenArrayPut(opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
       break;
     case Instruction::APUT:
-      cg->GenArrayPut(cu, opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
+      GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
       break;
     case Instruction::APUT_OBJECT:
-      cg->GenArrayObjPut(cu, opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
+      GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
       break;
     case Instruction::APUT_SHORT:
     case Instruction::APUT_CHAR:
-      cg->GenArrayPut(cu, opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
+      GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
       break;
     case Instruction::APUT_BYTE:
     case Instruction::APUT_BOOLEAN:
-      cg->GenArrayPut(cu, opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
+      GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
             rl_src[0], 0);
       break;
 
     case Instruction::IGET_OBJECT:
-      cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
       break;
 
     case Instruction::IGET_WIDE:
-      cg->GenIGet(cu, vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+      GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
       break;
 
     case Instruction::IGET:
-      cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+      GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_CHAR:
-      cg->GenIGet(cu, vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_SHORT:
-      cg->GenIGet(cu, vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IGET_BOOLEAN:
     case Instruction::IGET_BYTE:
-      cg->GenIGet(cu, vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+      GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
       break;
 
     case Instruction::IPUT_WIDE:
-      cg->GenIPut(cu, vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+      GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
       break;
 
     case Instruction::IPUT_OBJECT:
-      cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
       break;
 
     case Instruction::IPUT:
-      cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+      GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
-      cg->GenIPut(cu, vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+      GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_CHAR:
-      cg->GenIPut(cu, vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::IPUT_SHORT:
-      cg->GenIPut(cu, vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
       break;
 
     case Instruction::SGET_OBJECT:
-      cg->GenSget(cu, vB, rl_dest, false, true);
+      GenSget(vB, rl_dest, false, true);
       break;
     case Instruction::SGET:
     case Instruction::SGET_BOOLEAN:
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT:
-      cg->GenSget(cu, vB, rl_dest, false, false);
+      GenSget(vB, rl_dest, false, false);
       break;
 
     case Instruction::SGET_WIDE:
-      cg->GenSget(cu, vB, rl_dest, true, false);
+      GenSget(vB, rl_dest, true, false);
       break;
 
     case Instruction::SPUT_OBJECT:
-      cg->GenSput(cu, vB, rl_src[0], false, true);
+      GenSput(vB, rl_src[0], false, true);
       break;
 
     case Instruction::SPUT:
@@ -440,80 +434,80 @@
     case Instruction::SPUT_BYTE:
     case Instruction::SPUT_CHAR:
     case Instruction::SPUT_SHORT:
-      cg->GenSput(cu, vB, rl_src[0], false, false);
+      GenSput(vB, rl_src[0], false, false);
       break;
 
     case Instruction::SPUT_WIDE:
-      cg->GenSput(cu, vB, rl_src[0], true, false);
+      GenSput(vB, rl_src[0], true, false);
       break;
 
     case Instruction::INVOKE_STATIC_RANGE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, true));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
       break;
     case Instruction::INVOKE_STATIC:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, false));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
       break;
 
     case Instruction::INVOKE_DIRECT:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, false));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
       break;
     case Instruction::INVOKE_DIRECT_RANGE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, true));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
       break;
 
     case Instruction::INVOKE_VIRTUAL:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, false));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
       break;
     case Instruction::INVOKE_VIRTUAL_RANGE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, true));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
       break;
 
     case Instruction::INVOKE_SUPER:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, false));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
       break;
     case Instruction::INVOKE_SUPER_RANGE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, true));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
       break;
 
     case Instruction::INVOKE_INTERFACE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, false));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
       break;
     case Instruction::INVOKE_INTERFACE_RANGE:
-      cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, true));
+      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
       break;
 
     case Instruction::NEG_INT:
     case Instruction::NOT_INT:
-      cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_LONG:
     case Instruction::NOT_LONG:
-      cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_FLOAT:
-      cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::NEG_DOUBLE:
-      cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
       break;
 
     case Instruction::INT_TO_LONG:
-      cg->GenIntToLong(cu, rl_dest, rl_src[0]);
+      GenIntToLong(rl_dest, rl_src[0]);
       break;
 
     case Instruction::LONG_TO_INT:
-      rl_src[0] = UpdateLocWide(cu, rl_src[0]);
-      rl_src[0] = WideToNarrow(cu, rl_src[0]);
-      cg->StoreValue(cu, rl_dest, rl_src[0]);
+      rl_src[0] = UpdateLocWide(rl_src[0]);
+      rl_src[0] = WideToNarrow(rl_src[0]);
+      StoreValue(rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_BYTE:
     case Instruction::INT_TO_SHORT:
     case Instruction::INT_TO_CHAR:
-      cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src[0]);
+      GenIntNarrowing(opcode, rl_dest, rl_src[0]);
       break;
 
     case Instruction::INT_TO_FLOAT:
@@ -526,7 +520,7 @@
     case Instruction::DOUBLE_TO_INT:
     case Instruction::DOUBLE_TO_LONG:
     case Instruction::DOUBLE_TO_FLOAT:
-      cg->GenConversion(cu, opcode, rl_dest, rl_src[0]);
+      GenConversion(opcode, rl_dest, rl_src[0]);
       break;
 
 
@@ -541,15 +535,15 @@
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
       if (rl_src[0].is_const &&
-          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[0]))) {
-        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[1],
-                             cu->mir_graph->ConstantValue(rl_src[0].orig_sreg));
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[1],
+                             mir_graph_->ConstantValue(rl_src[0].orig_sreg));
       } else if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
-        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
-                             cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[0],
+                             mir_graph_->ConstantValue(rl_src[1].orig_sreg));
       } else {
-        cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
       }
       break;
 
@@ -566,10 +560,10 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
-        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], cu->mir_graph->ConstantValue(rl_src[1]));
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+        GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
       } else {
-        cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
       }
       break;
 
@@ -584,7 +578,7 @@
     case Instruction::OR_LONG_2ADDR:
     case Instruction::XOR_LONG_2ADDR:
       if (rl_src[0].is_const || rl_src[1].is_const) {
-        cg->GenArithImmOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+        GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
         break;
       }
       // Note: intentional fallthrough.
@@ -595,7 +589,7 @@
     case Instruction::MUL_LONG_2ADDR:
     case Instruction::DIV_LONG_2ADDR:
     case Instruction::REM_LONG_2ADDR:
-      cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::SHL_LONG:
@@ -605,9 +599,9 @@
     case Instruction::SHR_LONG_2ADDR:
     case Instruction::USHR_LONG_2ADDR:
       if (rl_src[1].is_const) {
-        cg->GenShiftImmOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+        GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
       } else {
-        cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+        GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
       }
       break;
 
@@ -621,7 +615,7 @@
     case Instruction::MUL_FLOAT_2ADDR:
     case Instruction::DIV_FLOAT_2ADDR:
     case Instruction::REM_FLOAT_2ADDR:
-      cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::ADD_DOUBLE:
@@ -634,7 +628,7 @@
     case Instruction::MUL_DOUBLE_2ADDR:
     case Instruction::DIV_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE_2ADDR:
-      cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::RSUB_INT:
@@ -656,7 +650,7 @@
     case Instruction::SHL_INT_LIT8:
     case Instruction::SHR_INT_LIT8:
     case Instruction::USHR_INT_LIT8:
-      cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], vC);
+      GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
       break;
 
     default:
@@ -665,33 +659,32 @@
 }
 
 // Process extended MIR instructions
-static void HandleExtendedMethodMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir)
 {
-  Codegen* cg = cu->cg.get();
   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
     case kMirOpCopy: {
-      RegLocation rl_src = GetSrc(cu, mir, 0);
-      RegLocation rl_dest = GetDest(cu, mir);
-      cg->StoreValue(cu, rl_dest, rl_src);
+      RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
+      RegLocation rl_dest = mir_graph_->GetDest(mir);
+      StoreValue(rl_dest, rl_src);
       break;
     }
     case kMirOpFusedCmplFloat:
-      cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, false /*double*/);
+      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
       break;
     case kMirOpFusedCmpgFloat:
-      cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, false /*double*/);
+      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
       break;
     case kMirOpFusedCmplDouble:
-      cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, true /*double*/);
+      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
       break;
     case kMirOpFusedCmpgDouble:
-      cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, true /*double*/);
+      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
       break;
     case kMirOpFusedCmpLong:
-      cg->GenFusedLongCmpBranch(cu, bb, mir);
+      GenFusedLongCmpBranch(bb, mir);
       break;
     case kMirOpSelect:
-      cg->GenSelect(cu, bb, mir);
+      GenSelect(bb, mir);
       break;
     default:
       break;
@@ -699,65 +692,63 @@
 }
 
 // Handle the content in each basic block.
-static bool MethodBlockCodeGen(CompilationUnit* cu, BasicBlock* bb)
+bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb)
 {
   if (bb->block_type == kDead) return false;
-  Codegen* cg = cu->cg.get();
-  cu->current_dalvik_offset = bb->start_offset;
+  current_dalvik_offset_ = bb->start_offset;
   MIR* mir;
-  LIR* label_list = cu->block_label_list;
   int block_id = bb->id;
 
-  label_list[block_id].operands[0] = bb->start_offset;
+  block_label_list_[block_id].operands[0] = bb->start_offset;
 
   // Insert the block label.
-  label_list[block_id].opcode = kPseudoNormalBlockLabel;
-  AppendLIR(cu, &label_list[block_id]);
+  block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
+  AppendLIR(&block_label_list_[block_id]);
 
   LIR* head_lir = NULL;
 
   // If this is a catch block, export the start address.
   if (bb->catch_entry) {
-    head_lir = NewLIR0(cu, kPseudoExportedPC);
+    head_lir = NewLIR0(kPseudoExportedPC);
   }
 
   // Free temp registers and reset redundant store tracking.
-  ResetRegPool(cu);
-  ResetDefTracking(cu);
+  ResetRegPool();
+  ResetDefTracking();
 
-  ClobberAllRegs(cu);
+  ClobberAllRegs();
 
   if (bb->block_type == kEntryBlock) {
-    int start_vreg = cu->num_dalvik_registers - cu->num_ins;
-    cg->GenEntrySequence(cu, &cu->reg_location[start_vreg],
-                         cu->reg_location[cu->method_sreg]);
+    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+    GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
+                         mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
   } else if (bb->block_type == kExitBlock) {
-    cg->GenExitSequence(cu);
+    GenExitSequence();
   }
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    ResetRegPool(cu);
-    if (cu->disable_opt & (1 << kTrackLiveTemps)) {
-      ClobberAllRegs(cu);
+    ResetRegPool();
+    if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
+      ClobberAllRegs();
     }
 
-    if (cu->disable_opt & (1 << kSuppressLoads)) {
-      ResetDefTracking(cu);
+    if (cu_->disable_opt & (1 << kSuppressLoads)) {
+      ResetDefTracking();
     }
 
     // Reset temp tracking sanity check.
     if (kIsDebugBuild) {
-      cu->live_sreg = INVALID_SREG;
+      live_sreg_ = INVALID_SREG;
     }
 
-    cu->current_dalvik_offset = mir->offset;
+    current_dalvik_offset_ = mir->offset;
     int opcode = mir->dalvikInsn.opcode;
     LIR* boundary_lir;
 
     // Mark the beginning of a Dalvik instruction for line tracking.
-    char* inst_str = cu->verbose ?
-       GetDalvikDisassembly(cu, mir) : NULL;
-    boundary_lir = MarkBoundary(cu, mir->offset, inst_str);
+    char* inst_str = cu_->verbose ?
+       mir_graph_->GetDalvikDisassembly(mir) : NULL;
+    boundary_lir = MarkBoundary(mir->offset, inst_str);
     // Remember the first LIR for this block.
     if (head_lir == NULL) {
       head_lir = boundary_lir;
@@ -777,35 +768,34 @@
     }
 
     if (opcode >= kMirOpFirst) {
-      HandleExtendedMethodMIR(cu, bb, mir);
+      HandleExtendedMethodMIR(bb, mir);
       continue;
     }
 
-    CompileDalvikInstruction(cu, mir, bb, label_list);
+    CompileDalvikInstruction(mir, bb, block_label_list_);
   }
 
   if (head_lir) {
     // Eliminate redundant loads/stores and delay stores into later slots.
-    ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
+    ApplyLocalOptimizations(head_lir, last_lir_insn_);
 
     // Generate an unconditional branch to the fallthrough block.
     if (bb->fall_through) {
-      cg->OpUnconditionalBranch(cu, &label_list[bb->fall_through->id]);
+      OpUnconditionalBranch(&block_label_list_[bb->fall_through->id]);
     }
   }
   return false;
 }
 
-void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case)
+void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case)
 {
-  Codegen* cg = cu->cg.get();
   // Find the first DalvikByteCode block.
-  int num_reachable_blocks = cu->mir_graph->GetNumReachableBlocks();
+  int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
   BasicBlock*bb = NULL;
   for (int idx = 0; idx < num_reachable_blocks; idx++) {
     // TODO: no direct access of growable lists.
-    int dfs_index = cu->mir_graph->GetDfsOrder()->elem_list[idx];
-    bb = cu->mir_graph->GetBasicBlock(dfs_index);
+    int dfs_index = mir_graph_->GetDfsOrder()->elem_list[idx];
+    bb = mir_graph_->GetBasicBlock(dfs_index);
     if (bb->block_type == kDalvikByteCode) {
       break;
     }
@@ -820,33 +810,32 @@
   MIR* mir = bb->first_mir_insn;
 
   // Free temp registers and reset redundant store tracking.
-  ResetRegPool(cu);
-  ResetDefTracking(cu);
-  ClobberAllRegs(cu);
+  ResetRegPool();
+  ResetDefTracking();
+  ClobberAllRegs();
 
-  cg->GenSpecialCase(cu, bb, mir, special_case);
+  GenSpecialCase(bb, mir, special_case);
 }
 
-void MethodMIR2LIR(CompilationUnit* cu)
+void Mir2Lir::MethodMIR2LIR()
 {
-  Codegen* cg = cu->cg.get();
   // Hold the labels of each block.
-  cu->block_label_list =
-      static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->mir_graph->GetNumBlocks(), true, kAllocLIR));
+  block_label_list_ =
+      static_cast<LIR*>(NewMem(cu_, sizeof(LIR) * mir_graph_->GetNumBlocks(), true, kAllocLIR));
 
-  PreOrderDfsIterator iter(cu->mir_graph.get(), false /* not iterative */);
+  PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
-    MethodBlockCodeGen(cu, bb);
+    MethodBlockCodeGen(bb);
   }
 
-  cg->HandleSuspendLaunchPads(cu);
+  HandleSuspendLaunchPads();
 
-  cg->HandleThrowLaunchPads(cu);
+  HandleThrowLaunchPads();
 
-  cg->HandleIntrinsicLaunchPads(cu);
+  HandleIntrinsicLaunchPads();
 
-  if (!(cu->disable_opt & (1 << kSafeOptimizations))) {
-    RemoveRedundantBranches(cu);
+  if (!(cu_->disable_opt & (1 << kSafeOptimizations))) {
+    RemoveRedundantBranches();
   }
 }
 
diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h
index b2767ba..d2d56f7 100644
--- a/src/compiler/dex/quick/mir_to_lir.h
+++ b/src/compiler/dex/quick/mir_to_lir.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,14 +14,738 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
-#define ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
+#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+#define ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+
+#include "invoke_type.h"
+#include "compiled_method.h"
+#include "compiler/dex/compiler_enums.h"
+#include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_utility.h"
+#include "compiler/dex/backend.h"
+#include "safe_map.h"
 
 namespace art {
-void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case);
-void MethodMIR2LIR(CompilationUnit* cu);
 
+// Set to 1 to measure cost of suspend check.
+#define NO_SUSPEND 0
+
+#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
+#define IS_BRANCH            (1ULL << kIsBranch)
+#define IS_IT                (1ULL << kIsIT)
+#define IS_LOAD              (1ULL << kMemLoad)
+#define IS_QUAD_OP           (1ULL << kIsQuadOp)
+#define IS_QUIN_OP           (1ULL << kIsQuinOp)
+#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
+#define IS_STORE             (1ULL << kMemStore)
+#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
+#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
+#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
+#define NO_OPERAND           (1ULL << kNoOperand)
+#define REG_DEF0             (1ULL << kRegDef0)
+#define REG_DEF1             (1ULL << kRegDef1)
+#define REG_DEFA             (1ULL << kRegDefA)
+#define REG_DEFD             (1ULL << kRegDefD)
+#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
+#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
+#define REG_DEF_LIST0        (1ULL << kRegDefList0)
+#define REG_DEF_LIST1        (1ULL << kRegDefList1)
+#define REG_DEF_LR           (1ULL << kRegDefLR)
+#define REG_DEF_SP           (1ULL << kRegDefSP)
+#define REG_USE0             (1ULL << kRegUse0)
+#define REG_USE1             (1ULL << kRegUse1)
+#define REG_USE2             (1ULL << kRegUse2)
+#define REG_USE3             (1ULL << kRegUse3)
+#define REG_USE4             (1ULL << kRegUse4)
+#define REG_USEA             (1ULL << kRegUseA)
+#define REG_USEC             (1ULL << kRegUseC)
+#define REG_USED             (1ULL << kRegUseD)
+#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
+#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
+#define REG_USE_LIST0        (1ULL << kRegUseList0)
+#define REG_USE_LIST1        (1ULL << kRegUseList1)
+#define REG_USE_LR           (1ULL << kRegUseLR)
+#define REG_USE_PC           (1ULL << kRegUsePC)
+#define REG_USE_SP           (1ULL << kRegUseSP)
+#define SETS_CCODES          (1ULL << kSetsCCodes)
+#define USES_CCODES          (1ULL << kUsesCCodes)
+
+// Common combo register usage patterns.
+#define REG_DEF01            (REG_DEF0 | REG_DEF1)
+#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
+#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
+#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
+#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
+#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
+#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
+#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
+#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
+#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
+#define REG_USE012           (REG_USE01 | REG_USE2)
+#define REG_USE014           (REG_USE01 | REG_USE4)
+#define REG_USE01            (REG_USE0 | REG_USE1)
+#define REG_USE02            (REG_USE0 | REG_USE2)
+#define REG_USE12            (REG_USE1 | REG_USE2)
+#define REG_USE23            (REG_USE2 | REG_USE3)
+
+struct BasicBlock;
+struct CallInfo;
+struct CompilationUnit;
+struct MIR;
+struct RegLocation;
+struct RegisterInfo;
+class MIRGraph;
+class Mir2Lir;
+
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
+                            uint32_t method_idx, uintptr_t direct_code,
+                            uintptr_t direct_method, InvokeType type);
+
+typedef std::vector<uint8_t> CodeBuffer;
+
+
+struct LIR {
+  int offset;               // Offset of this instruction.
+  int dalvik_offset;        // Offset of Dalvik opcode.
+  LIR* next;
+  LIR* prev;
+  LIR* target;
+  int opcode;
+  int operands[5];          // [0..4] = [dest, src1, src2, extra, extra2].
+  struct {
+    bool is_nop:1;          // LIR is optimized away.
+    bool pcRelFixup:1;      // May need pc-relative fixup.
+    unsigned int size:5;    // Note: size is in bytes.
+    unsigned int unused:25;
+  } flags;
+  int alias_info;           // For Dalvik register & litpool disambiguation.
+  uint64_t use_mask;        // Resource mask for use.
+  uint64_t def_mask;        // Resource mask for def.
+};
+
+// Target-specific initialization.
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
+
+// Utility macros to traverse the LIR list.
+#define NEXT_LIR(lir) (lir->next)
+#define PREV_LIR(lir) (lir->prev)
+
+// Defines for alias_info (tracks Dalvik register references).
+#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
+#define DECODE_ALIAS_INFO_WIDE_FLAG     (0x80000000)
+#define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
+#define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
+
+// Common resource macros.
+#define ENCODE_CCODE            (1ULL << kCCode)
+#define ENCODE_FP_STATUS        (1ULL << kFPStatus)
+
+// Abstract memory locations.
+#define ENCODE_DALVIK_REG       (1ULL << kDalvikReg)
+#define ENCODE_LITERAL          (1ULL << kLiteral)
+#define ENCODE_HEAP_REF         (1ULL << kHeapRef)
+#define ENCODE_MUST_NOT_ALIAS   (1ULL << kMustNotAlias)
+
+#define ENCODE_ALL              (~0ULL)
+#define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
+                                 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+//TODO: replace these macros
+#define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
+#define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
+#define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
+#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
+#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))
+#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
+
+class Mir2Lir : public Backend {
+
+  public:
+    struct SwitchTable {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int vaddr;                  // Dalvik offset of switch opcode.
+      LIR* anchor;                // Reference instruction for relative offsets.
+      LIR** targets;              // Array of case targets.
+    };
+
+    struct FillArrayData {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int size;
+      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
+    };
+
+    /* Static register use counts */
+    struct RefCounts {
+      int count;
+      int s_reg;
+      bool double_start;   // Starting v_reg for a double
+    };
+
+    /*
+     * Data structure tracking the mapping between a Dalvik register (pair) and a
+     * native register (pair). The idea is to reuse the previously loaded value
+     * if possible, otherwise to keep the value in a native register as long as
+     * possible.
+     */
+    struct RegisterInfo {
+      int reg;                    // Reg number
+      bool in_use;                // Has it been allocated?
+      bool is_temp;               // Can allocate as temp?
+      bool pair;                  // Part of a register pair?
+      int partner;                // If pair, other reg of pair.
+      bool live;                  // Is there an associated SSA name?
+      bool dirty;                 // If live, is it dirty?
+      int s_reg;                  // Name of live value.
+      LIR *def_start;             // Starting inst in last def sequence.
+      LIR *def_end;               // Ending inst in last def sequence.
+    };
+
+   struct RegisterPool {
+      int num_core_regs;
+      RegisterInfo *core_regs;
+      int next_core_reg;
+      int num_fp_regs;
+      RegisterInfo *FPRegs;
+      int next_fp_reg;
+    };
+
+    struct PromotionMap {
+      RegLocationType core_location:3;
+      uint8_t core_reg;
+      RegLocationType fp_location:3;
+      uint8_t FpReg;
+      bool first_in_pair;
+    };
+
+    virtual ~Mir2Lir(){};
+
+    int32_t s4FromSwitchData(const void* switch_data) {
+      return *reinterpret_cast<const int32_t*>(switch_data);
+    }
+
+    RegisterClass oat_reg_class_by_size(OpSize size) {
+      return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
+              size == kSignedByte ) ? kCoreReg : kAnyReg;
+    }
+
+    size_t CodeBufferSizeInBytes() {
+      return code_buffer_.size() / sizeof(code_buffer_[0]);
+    }
+
+    // Shared by all targets - implemented in codegen_util.cc
+    void AppendLIR(LIR* lir);
+    void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
+    void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
+
+    int ComputeFrameSize();
+    virtual void Materialize();
+    virtual CompiledMethod* GetCompiledMethod();
+    void MarkSafepointPC(LIR* inst);
+    bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put);
+    void SetupResourceMasks(LIR* lir);
+    void AssembleLIR();
+    void SetMemRefType(LIR* lir, bool is_load, int mem_type);
+    void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
+    void SetupRegMask(uint64_t* mask, int reg);
+    void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+    void DumpPromotionMap();
+    void CodegenDump();
+    LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+    LIR* NewLIR0(int opcode);
+    LIR* NewLIR1(int opcode, int dest);
+    LIR* NewLIR2(int opcode, int dest, int src1);
+    LIR* NewLIR3(int opcode, int dest, int src1, int src2);
+    LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
+    LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
+    LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
+    LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+    LIR* AddWordData(LIR* *constant_list_p, int value);
+    LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
+    void ProcessSwitchTables();
+    void DumpSparseSwitchTable(const uint16_t* table);
+    void DumpPackedSwitchTable(const uint16_t* table);
+    LIR* MarkBoundary(int offset, const char* inst_str);
+    void NopLIR(LIR* lir);
+    bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
+    bool IsInexpensiveConstant(RegLocation rl_src);
+    ConditionCode FlipComparisonOrder(ConditionCode before);
+    void DumpMappingTable(const char* table_name, const std::string& descriptor,
+                          const std::string& name, const std::string& signature,
+                          const std::vector<uint32_t>& v);
+    void InstallLiteralPools();
+    void InstallSwitchTables();
+    void InstallFillArrayData();
+    bool VerifyCatchEntries();
+    void CreateMappingTables();
+    void CreateNativeGcMap();
+    int AssignLiteralOffset(int offset);
+    int AssignSwitchTablesOffset(int offset);
+    int AssignFillArrayDataOffset(int offset);
+    int AssignInsnOffsets();
+    void AssignOffsets();
+    LIR* InsertCaseLabel(int vaddr, int keyVal);
+    void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+    void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+
+    // Shared by all targets - implemented in local_optimizations.cc
+    void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
+    void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
+    void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
+    void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
+    void RemoveRedundantBranches();
+
+    // Shared by all targets - implemented in ralloc_util.cc
+    int GetSRegHi(int lowSreg);
+    bool oat_live_out(int s_reg);
+    int oatSSASrc(MIR* mir, int num);
+    void SimpleRegAlloc();
+    void ResetRegPool();
+    void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
+    void DumpRegPool(RegisterInfo* p, int num_regs);
+    void DumpCoreRegPool();
+    void DumpFpRegPool();
+    void ClobberBody(RegisterInfo* p);
+    void Clobber(int reg);
+    void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg);
+    void ClobberSReg(int s_reg);
+    int SRegToPMap(int s_reg);
+    void RecordCorePromotion(int reg, int s_reg);
+    int AllocPreservedCoreReg(int s_reg);
+    void RecordFpPromotion(int reg, int s_reg);
+    int AllocPreservedSingle(int s_reg, bool even);
+    int AllocPreservedDouble(int s_reg);
+    int AllocPreservedFPReg(int s_reg, bool double_start);
+    int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
+                      bool required);
+    int AllocTempDouble();
+    int AllocFreeTemp();
+    int AllocTemp();
+    int AllocTempFloat();
+    RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg);
+    RegisterInfo* AllocLive(int s_reg, int reg_class);
+    void FreeTemp(int reg);
+    RegisterInfo* IsLive(int reg);
+    RegisterInfo* IsTemp(int reg);
+    RegisterInfo* IsPromoted(int reg);
+    bool IsDirty(int reg);
+    void LockTemp(int reg);
+    void ResetDefBody(RegisterInfo* p);
+    void ResetDef(int reg);
+    void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2);
+    void MarkDef(RegLocation rl, LIR *start, LIR *finish);
+    void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
+    RegLocation WideToNarrow(RegLocation rl);
+    void ResetDefLoc(RegLocation rl);
+    void ResetDefLocWide(RegLocation rl);
+    void ResetDefTracking();
+    void ClobberAllRegs();
+    void FlushAllRegsBody(RegisterInfo* info, int num_regs);
+    void FlushAllRegs();
+    bool RegClassMatches(int reg_class, int reg);
+    void MarkLive(int reg, int s_reg);
+    void MarkTemp(int reg);
+    void UnmarkTemp(int reg);
+    void MarkPair(int low_reg, int high_reg);
+    void MarkClean(RegLocation loc);
+    void MarkDirty(RegLocation loc);
+    void MarkInUse(int reg);
+    void CopyRegInfo(int new_reg, int old_reg);
+    bool CheckCorePoolSanity();
+    RegLocation UpdateLoc(RegLocation loc);
+    RegLocation UpdateLocWide(RegLocation loc);
+    RegLocation UpdateRawLoc(RegLocation loc);
+    RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
+    RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
+    void CountRefs(BasicBlock* bb, RefCounts* core_counts,
+                   RefCounts* fp_counts);
+    void DumpCounts(const RefCounts* arr, int size, const char* msg);
+    void DoPromotion();
+    int VRegOffset(int v_reg);
+    int SRegOffset(int s_reg);
+    RegLocation GetReturnWide(bool is_double);
+    RegLocation GetReturn(bool is_float);
+
+    // Shared by all targets - implemented in gen_common.cc.
+    bool HandleEasyDivide(Instruction::Code dalvik_opcode,
+                          RegLocation rl_src, RegLocation rl_dest, int lit);
+    bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
+    void HandleSuspendLaunchPads();
+    void HandleIntrinsicLaunchPads();
+    void HandleThrowLaunchPads();
+    void GenBarrier();
+    LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
+    LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
+                       ThrowKind kind);
+    LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags);
+    LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
+                        ThrowKind kind);
+    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
+                             RegLocation rl_src2, LIR* taken, LIR* fall_through);
+    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
+                                 LIR* taken, LIR* fall_through);
+    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src);
+    void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
+                     RegLocation rl_src);
+    void GenFilledNewArray(CallInfo* info);
+    void GenSput(uint32_t field_idx, RegLocation rl_src,
+                 bool is_long_or_double, bool is_object);
+    void GenSget(uint32_t field_idx, RegLocation rl_dest,
+                 bool is_long_or_double, bool is_object);
+    void GenShowTarget();
+    void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
+    void GenConstString(uint32_t string_idx, RegLocation rl_dest);
+    void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
+    void GenThrow(RegLocation rl_src);
+    void GenInstanceof(uint32_t type_idx, RegLocation rl_dest,
+                       RegLocation rl_src);
+    void GenCheckCast(uint32_t type_idx, RegLocation rl_src);
+    void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
+                      RegLocation rl_src1, RegLocation rl_src2);
+    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_shift);
+    void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
+                       RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src, int lit);
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                        RegLocation rl_src1, RegLocation rl_src2);
+    void GenConversionCall(int func_offset, RegLocation rl_dest,
+                           RegLocation rl_src);
+    void GenSuspendTest(int opt_flags);
+    void GenSuspendTestAndBranch(int opt_flags, LIR* target);
+
+    // Shared by all targets - implemented in gen_invoke.cc.
+    int CallHelperSetup(int helper_offset);
+    LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc);
+    void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+                                         RegLocation arg1, bool safepoint_pc);
+    void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0,
+                                         int arg1, bool safepoint_pc);
+    void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperImmMethod(int helper_offset, int arg0,
+                                    bool safepoint_pc);
+    void CallRuntimeHelperRegLocationRegLocation(int helper_offset,
+                                                 RegLocation arg0, RegLocation arg1,
+                                                 bool safepoint_pc);
+    void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1,
+                                 bool safepoint_pc);
+    void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+                                    int arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0,
+                                               RegLocation arg2, bool safepoint_pc);
+    void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2,
+                                       bool safepoint_pc);
+    void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+                                                    int arg0, RegLocation arg1, RegLocation arg2,
+                                                    bool safepoint_pc);
+    void GenInvoke(CallInfo* info);
+    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
+    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                             NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                             bool skip_this);
+    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                           NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+                           uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                           bool skip_this);
+    RegLocation InlineTarget(CallInfo* info);
+    RegLocation InlineTargetWide(CallInfo* info);
+
+    bool GenInlinedCharAt(CallInfo* info);
+    bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
+    bool GenInlinedAbsInt(CallInfo* info);
+    bool GenInlinedAbsLong(CallInfo* info);
+    bool GenInlinedFloatCvt(CallInfo* info);
+    bool GenInlinedDoubleCvt(CallInfo* info);
+    bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
+    bool GenInlinedStringCompareTo(CallInfo* info);
+    bool GenInlinedCurrentThread(CallInfo* info);
+    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+    bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
+                             bool is_volatile, bool is_ordered);
+    bool GenIntrinsic(CallInfo* info);
+    int LoadArgRegs(CallInfo* info, int call_state,
+                    NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+                    uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                    bool skip_this);
+
+    // Shared by all targets - implemented in gen_loadstore.cc.
+    RegLocation LoadCurrMethod();
+    void LoadCurrMethodDirect(int r_tgt);
+    LIR* LoadConstant(int r_dest, int value);
+    LIR* LoadWordDisp(int rBase, int displacement, int r_dest);
+    RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
+    RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
+    void LoadValueDirect(RegLocation rl_src, int r_dest);
+    void LoadValueDirectFixed(RegLocation rl_src, int r_dest);
+    void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi);
+    void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi);
+    LIR* StoreWordDisp(int rBase, int displacement, int r_src);
+    void StoreValue(RegLocation rl_dest, RegLocation rl_src);
+    void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);
+
+    // Shared by all targets - implemented in mir_to_lir.cc.
+    void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
+    void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
+    bool MethodBlockCodeGen(BasicBlock* bb);
+    void SpecialMIR2LIR(SpecialCaseHandler special_case);
+    void MethodMIR2LIR();
+
+
+
+    // Required for target - codegen helpers.
+    virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+    virtual int LoadHelper(int offset) = 0;
+    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
+    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg) = 0;
+    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0;
+    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0;
+    virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0;
+    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0;
+    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0;
+    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0;
+    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0;
+    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg) = 0;
+    virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0;
+
+    // Required for target - register utilities.
+    virtual bool IsFpReg(int reg) = 0;
+    virtual bool SameRegType(int reg1, int reg2) = 0;
+    virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
+    virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
+    virtual int S2d(int low_reg, int high_reg) = 0;
+    virtual int TargetReg(SpecialTargetRegister reg) = 0;
+    virtual RegisterInfo* GetRegInfo(int reg) = 0;
+    virtual RegLocation GetReturnAlt() = 0;
+    virtual RegLocation GetReturnWideAlt() = 0;
+    virtual RegLocation LocCReturn() = 0;
+    virtual RegLocation LocCReturnDouble() = 0;
+    virtual RegLocation LocCReturnFloat() = 0;
+    virtual RegLocation LocCReturnWide() = 0;
+    virtual uint32_t FpRegMask() = 0;
+    virtual uint64_t GetRegMaskCommon(int reg) = 0;
+    virtual void AdjustSpillMask() = 0;
+    virtual void ClobberCalleeSave() = 0;
+    virtual void FlushReg(int reg) = 0;
+    virtual void FlushRegWide(int reg1, int reg2) = 0;
+    virtual void FreeCallTemps() = 0;
+    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0;
+    virtual void LockCallTemps() = 0;
+    virtual void MarkPreservedSingle(int v_reg, int reg) = 0;
+    virtual void CompilerInitializeRegAlloc() = 0;
+
+    // Required for target - miscellaneous.
+    virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr) = 0;
+    virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
+    virtual void SetupTargetResourceMasks(LIR* lir) = 0;
+    virtual const char* GetTargetInstFmt(int opcode) = 0;
+    virtual const char* GetTargetInstName(int opcode) = 0;
+    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
+    virtual uint64_t GetPCUseDefEncoding() = 0;
+    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
+    virtual int GetInsnSize(LIR* lir) = 0;
+    virtual bool IsUnconditionalBranch(LIR* lir) = 0;
+
+    // Required for target - Dalvik-level generators.
+    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenArithOpDouble(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1,
+                                  RegLocation rl_src2) = 0;
+    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) = 0;
+    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) = 0;
+    virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier) = 0;
+    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0;
+    virtual bool GenInlinedSqrt(CallInfo* info) = 0;
+    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
+                           RegLocation rl_src2) = 0;
+    virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base,
+                                int offset, ThrowKind kind) = 0;
+    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi,
+                                  bool is_div) = 0;
+    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit,
+                                     bool is_div) = 0;
+    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) = 0;
+    virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0;
+    virtual void GenEntrySequence(RegLocation* ArgLocs,
+                                  RegLocation rl_method) = 0;
+    virtual void GenExitSequence() = 0;
+    virtual void GenFillArrayData(uint32_t table_offset,
+                                  RegLocation rl_src) = 0;
+    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) = 0;
+    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0;
+    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src) = 0;
+    virtual void GenMoveException(RegLocation rl_dest) = 0;
+    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                               RegLocation rl_result, int lit, int first_bit,
+                                               int second_bit) = 0;
+    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
+    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src) = 0;
+    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case) = 0;
+    virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
+    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                     RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+    virtual void GenShiftImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1,
+                                   RegLocation rl_shift) = 0;
+
+    // Required for target - single operation generators.
+    virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2,
+                             LIR* target) = 0;
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
+                                LIR* target) = 0;
+    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg,
+                                LIR* target) = 0;
+    virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
+    virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
+    virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
+    virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0;
+    virtual LIR* OpReg(OpKind op, int r_dest_src) = 0;
+    virtual LIR* OpRegCopy(int r_dest, int r_src) = 0;
+    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0;
+    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0;
+    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
+    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
+    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
+                             int r_src2) = 0;
+    virtual LIR* OpTestSuspend(LIR* target) = 0;
+    virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0;
+    virtual LIR* OpVldm(int rBase, int count) = 0;
+    virtual LIR* OpVstm(int rBase, int count) = 0;
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale,
+                       int offset) = 0;
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
+                               int src_hi) = 0;
+    virtual void OpTlsCmp(int offset, int val) = 0;
+    virtual bool InexpensiveConstantInt(int32_t value) = 0;
+    virtual bool InexpensiveConstantFloat(int32_t value) = 0;
+    virtual bool InexpensiveConstantLong(int64_t value) = 0;
+    virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+
+    // Temp workaround
+    void Workaround7250540(RegLocation rl_dest, int value);
+
+    // TODO: add accessors for these.
+    LIR* literal_list_;                        // Constants.
+    LIR* method_literal_list_;                 // Method literals requiring patching.
+    LIR* code_literal_list_;                   // Code literals requiring patching.
+
+  protected:
+    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+
+    CompilationUnit* GetCompilationUnit() {
+      return cu_;
+    }
+
+    CompilationUnit* const cu_;
+    MIRGraph* const mir_graph_;
+    GrowableList switch_tables_;
+    GrowableList fill_array_data_;
+    GrowableList throw_launchpads_;
+    GrowableList suspend_launchpads_;
+    GrowableList intrinsic_launchpads_;
+    SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
+    /*
+     * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
+     * Native PC is on the return address of the safepointed operation.  Dex PC is for
+     * the instruction being executed at the safepoint.
+     */
+    std::vector<uint32_t> pc2dex_mapping_table_;
+    /*
+     * Holds mapping from Dex PC to native PC for catch entry points.  Native PC and Dex PC
+     * immediately preceed the instruction.
+     */
+    std::vector<uint32_t> dex2pc_mapping_table_;
+    int data_offset_;                     // starting offset of literal pool.
+    int total_size_;                      // header + code size.
+    LIR* block_label_list_;
+    PromotionMap* promotion_map_;
+    /*
+     * TODO: The code generation utilities don't have a built-in
+     * mechanism to propagate the original Dalvik opcode address to the
+     * associated generated instructions.  For the trace compiler, this wasn't
+     * necessary because the interpreter handled all throws and debugging
+     * requests.  For now we'll handle this by placing the Dalvik offset
+     * in the CompilationUnit struct before codegen for each instruction.
+     * The low-level LIR creation utilites will pull it from here.  Rework this.
+     */
+    int current_dalvik_offset_;
+    RegisterPool* reg_pool_;
+    /*
+     * Sanity checking for the register temp tracking.  The same ssa
+     * name should never be associated with one temp register per
+     * instruction compilation.
+     */
+    int live_sreg_;
+    CodeBuffer code_buffer_;
+    std::vector<uint32_t> combined_mapping_table_;
+    std::vector<uint32_t> core_vmap_table_;
+    std::vector<uint32_t> fp_vmap_table_;
+    std::vector<uint8_t> native_gc_map_;
+    int num_core_spills_;
+    int num_fp_spills_;
+    int frame_size_;
+    unsigned int core_spill_mask_;
+    unsigned int fp_spill_mask_;
+    LIR* first_lir_insn_;
+    LIR* last_lir_insn_;
+
+};  // Class Mir2Lir
 
 }  // namespace art
 
-#endif // ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
+#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index 18c7714..a89cfa8 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -19,31 +19,24 @@
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_internals.h"
 #include "compiler/dex/compiler_utility.h"
-//#include "compiler/dex/dataflow.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "ralloc_util.h"
 
 namespace art {
 
-static const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
-                                    INVALID_REG, INVALID_REG, INVALID_SREG,
-                                    INVALID_SREG};
-
 /*
  * Free all allocated temps in the temp pools.  Note that this does
  * not affect the "liveness" of a temp register, which will stay
  * live until it is either explicitly killed or reallocated.
  */
-void ResetRegPool(CompilationUnit* cu)
+void Mir2Lir::ResetRegPool()
 {
   int i;
-  for (i=0; i < cu->reg_pool->num_core_regs; i++) {
-    if (cu->reg_pool->core_regs[i].is_temp)
-      cu->reg_pool->core_regs[i].in_use = false;
+  for (i=0; i < reg_pool_->num_core_regs; i++) {
+    if (reg_pool_->core_regs[i].is_temp)
+      reg_pool_->core_regs[i].in_use = false;
   }
-  for (i=0; i < cu->reg_pool->num_fp_regs; i++) {
-    if (cu->reg_pool->FPRegs[i].is_temp)
-      cu->reg_pool->FPRegs[i].in_use = false;
+  for (i=0; i < reg_pool_->num_fp_regs; i++) {
+    if (reg_pool_->FPRegs[i].is_temp)
+      reg_pool_->FPRegs[i].in_use = false;
   }
 }
 
@@ -51,7 +44,7 @@
   * Set up temp & preserved register pools specialized by target.
   * Note: num_regs may be zero.
   */
-void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num)
+void Mir2Lir::CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num)
 {
   int i;
   for (i=0; i < num; i++) {
@@ -65,7 +58,7 @@
   }
 }
 
-void DumpRegPool(RegisterInfo* p, int num_regs)
+void Mir2Lir::DumpRegPool(RegisterInfo* p, int num_regs)
 {
   LOG(INFO) << "================================================";
   for (int i = 0; i < num_regs; i++) {
@@ -78,18 +71,18 @@
   LOG(INFO) << "================================================";
 }
 
-void DumpCoreRegPool(CompilationUnit* cu)
+void Mir2Lir::DumpCoreRegPool()
 {
-  DumpRegPool(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs);
+  DumpRegPool(reg_pool_->core_regs, reg_pool_->num_core_regs);
 }
 
-void DumpFpRegPool(CompilationUnit* cu)
+void Mir2Lir::DumpFpRegPool()
 {
-  DumpRegPool(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs);
+  DumpRegPool(reg_pool_->FPRegs, reg_pool_->num_fp_regs);
 }
 
 /* Mark a temp register as dead.  Does not affect allocation state. */
-static void ClobberBody(CompilationUnit *cu, RegisterInfo* p)
+void Mir2Lir::ClobberBody(RegisterInfo* p)
 {
   if (p->is_temp) {
     DCHECK(!(p->live && p->dirty))  << "Live & dirty temp in clobber";
@@ -99,19 +92,18 @@
     p->def_end = NULL;
     if (p->pair) {
       p->pair = false;
-      Clobber(cu, p->partner);
+      Clobber(p->partner);
     }
   }
 }
 
 /* Mark a temp register as dead.  Does not affect allocation state. */
-void Clobber(CompilationUnit* cu, int reg)
+void Mir2Lir::Clobber(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  ClobberBody(cu, cg->GetRegInfo(cu, reg));
+  ClobberBody(GetRegInfo(reg));
 }
 
-static void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
+void Mir2Lir::ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
 {
   int i;
   for (i=0; i< num_regs; i++) {
@@ -136,16 +128,16 @@
  * changes (for example: INT_TO_FLOAT v1, v1).  Revisit when improved register allocation is
  * addressed.
  */
-void ClobberSReg(CompilationUnit* cu, int s_reg)
+void Mir2Lir::ClobberSReg(int s_reg)
 {
   /* Reset live temp tracking sanity checker */
   if (kIsDebugBuild) {
-    if (s_reg == cu->live_sreg) {
-      cu->live_sreg = INVALID_SREG;
+    if (s_reg == live_sreg_) {
+      live_sreg_ = INVALID_SREG;
     }
   }
-  ClobberSRegBody(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs, s_reg);
-  ClobberSRegBody(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs, s_reg);
+  ClobberSRegBody(reg_pool_->core_regs, reg_pool_->num_core_regs, s_reg);
+  ClobberSRegBody(reg_pool_->FPRegs, reg_pool_->num_fp_regs, s_reg);
 }
 
 /*
@@ -157,60 +149,57 @@
  * ssa name (above the last original Dalvik register).  This function
  * maps SSA names to positions in the promotion_map array.
  */
-int SRegToPMap(CompilationUnit* cu, int s_reg)
+int Mir2Lir::SRegToPMap(int s_reg)
 {
-  DCHECK_LT(s_reg, cu->mir_graph->GetNumSSARegs());
+  DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs());
   DCHECK_GE(s_reg, 0);
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
   if (v_reg >= 0) {
-    DCHECK_LT(v_reg, cu->num_dalvik_registers);
+    DCHECK_LT(v_reg, cu_->num_dalvik_registers);
     return v_reg;
   } else {
     int pos = std::abs(v_reg) - std::abs(SSA_METHOD_BASEREG);
-    DCHECK_LE(pos, cu->num_compiler_temps);
-    return cu->num_dalvik_registers + pos;
+    DCHECK_LE(pos, cu_->num_compiler_temps);
+    return cu_->num_dalvik_registers + pos;
   }
 }
 
-void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg)
+void Mir2Lir::RecordCorePromotion(int reg, int s_reg)
 {
-  Codegen* cg = cu->cg.get();
-  int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
-  cg->GetRegInfo(cu, reg)->in_use = true;
-  cu->core_spill_mask |= (1 << reg);
+  int p_map_idx = SRegToPMap(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  GetRegInfo(reg)->in_use = true;
+  core_spill_mask_ |= (1 << reg);
   // Include reg for later sort
-  cu->core_vmap_table.push_back(reg << VREG_NUM_WIDTH |
-                                 (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
-  cu->num_core_spills++;
-  cu->promotion_map[p_map_idx].core_location = kLocPhysReg;
-  cu->promotion_map[p_map_idx].core_reg = reg;
+  core_vmap_table_.push_back(reg << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
+  num_core_spills_++;
+  promotion_map_[p_map_idx].core_location = kLocPhysReg;
+  promotion_map_[p_map_idx].core_reg = reg;
 }
 
 /* Reserve a callee-save register.  Return -1 if none available */
-static int AllocPreservedCoreReg(CompilationUnit* cu, int s_reg)
+int Mir2Lir::AllocPreservedCoreReg(int s_reg)
 {
   int res = -1;
-  RegisterInfo* core_regs = cu->reg_pool->core_regs;
-  for (int i = 0; i < cu->reg_pool->num_core_regs; i++) {
+  RegisterInfo* core_regs = reg_pool_->core_regs;
+  for (int i = 0; i < reg_pool_->num_core_regs; i++) {
     if (!core_regs[i].is_temp && !core_regs[i].in_use) {
       res = core_regs[i].reg;
-      RecordCorePromotion(cu, res, s_reg);
+      RecordCorePromotion(res, s_reg);
       break;
     }
   }
   return res;
 }
 
-void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg)
+void Mir2Lir::RecordFpPromotion(int reg, int s_reg)
 {
-  Codegen* cg = cu->cg.get();
-  int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
-  cg->GetRegInfo(cu, reg)->in_use = true;
-  cg->MarkPreservedSingle(cu, v_reg, reg);
-  cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
-  cu->promotion_map[p_map_idx].FpReg = reg;
+  int p_map_idx = SRegToPMap(s_reg);
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  GetRegInfo(reg)->in_use = true;
+  MarkPreservedSingle(v_reg, reg);
+  promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+  promotion_map_[p_map_idx].FpReg = reg;
 }
 
 /*
@@ -218,15 +207,15 @@
  * even/odd  allocation, but go ahead and allocate anything if not
  * available.  If nothing's available, return -1.
  */
-static int AllocPreservedSingle(CompilationUnit* cu, int s_reg, bool even)
+int Mir2Lir::AllocPreservedSingle(int s_reg, bool even)
 {
   int res = -1;
-  RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
-  for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+  RegisterInfo* FPRegs = reg_pool_->FPRegs;
+  for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
     if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
       ((FPRegs[i].reg & 0x1) == 0) == even) {
       res = FPRegs[i].reg;
-      RecordFpPromotion(cu, res, s_reg);
+      RecordFpPromotion(res, s_reg);
       break;
     }
   }
@@ -241,21 +230,20 @@
  * allocate if we can't meet the requirements for the pair of
  * s_reg<=sX[even] & (s_reg+1)<= sX+1.
  */
-static int AllocPreservedDouble(CompilationUnit* cu, int s_reg)
+int Mir2Lir::AllocPreservedDouble(int s_reg)
 {
-  Codegen* cg = cu->cg.get();
   int res = -1; // Assume failure
-  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
-  int p_map_idx = SRegToPMap(cu, s_reg);
-  if (cu->promotion_map[p_map_idx+1].fp_location == kLocPhysReg) {
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  int p_map_idx = SRegToPMap(s_reg);
+  if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
     // Upper reg is already allocated.  Can we fit?
-    int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
+    int high_reg = promotion_map_[p_map_idx+1].FpReg;
     if ((high_reg & 1) == 0) {
       // High reg is even - fail.
       return res;
     }
     // Is the low reg of the pair free?
-    RegisterInfo* p = cg->GetRegInfo(cu, high_reg-1);
+    RegisterInfo* p = GetRegInfo(high_reg-1);
     if (p->in_use || p->is_temp) {
       // Already allocated or not preserved - fail.
       return res;
@@ -264,10 +252,10 @@
     res = p->reg;
     p->in_use = true;
     DCHECK_EQ((res & 1), 0);
-    cg->MarkPreservedSingle(cu, v_reg, res);
+    MarkPreservedSingle(v_reg, res);
   } else {
-    RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
-    for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+    RegisterInfo* FPRegs = reg_pool_->FPRegs;
+    for (int i = 0; i < reg_pool_->num_fp_regs; i++) {
       if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
         ((FPRegs[i].reg & 0x1) == 0x0) &&
         !FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
@@ -275,19 +263,19 @@
         (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
         res = FPRegs[i].reg;
         FPRegs[i].in_use = true;
-        cg->MarkPreservedSingle(cu, v_reg, res);
+        MarkPreservedSingle(v_reg, res);
         FPRegs[i+1].in_use = true;
         DCHECK_EQ(res + 1, FPRegs[i+1].reg);
-        cg->MarkPreservedSingle(cu, v_reg+1, res+1);
+        MarkPreservedSingle(v_reg+1, res+1);
         break;
       }
     }
   }
   if (res != -1) {
-    cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
-    cu->promotion_map[p_map_idx].FpReg = res;
-    cu->promotion_map[p_map_idx+1].fp_location = kLocPhysReg;
-    cu->promotion_map[p_map_idx+1].FpReg = res + 1;
+    promotion_map_[p_map_idx].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx].FpReg = res;
+    promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
+    promotion_map_[p_map_idx+1].FpReg = res + 1;
   }
   return res;
 }
@@ -299,22 +287,22 @@
  * single regs (but if can't still attempt to allocate a single, preferring
  * first to allocate an odd register.
  */
-static int AllocPreservedFPReg(CompilationUnit* cu, int s_reg, bool double_start)
+int Mir2Lir::AllocPreservedFPReg(int s_reg, bool double_start)
 {
   int res = -1;
   if (double_start) {
-    res = AllocPreservedDouble(cu, s_reg);
+    res = AllocPreservedDouble(s_reg);
   }
   if (res == -1) {
-    res = AllocPreservedSingle(cu, s_reg, false /* try odd # */);
+    res = AllocPreservedSingle(s_reg, false /* try odd # */);
   }
   if (res == -1)
-    res = AllocPreservedSingle(cu, s_reg, true /* try even # */);
+    res = AllocPreservedSingle(s_reg, true /* try even # */);
   return res;
 }
 
-static int AllocTempBody(CompilationUnit* cu, RegisterInfo* p, int num_regs, int* next_temp,
-                          bool required)
+int Mir2Lir::AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp,
+                           bool required)
 {
   int i;
   int next = *next_temp;
@@ -322,7 +310,7 @@
     if (next >= num_regs)
       next = 0;
     if (p[next].is_temp && !p[next].in_use && !p[next].live) {
-      Clobber(cu, p[next].reg);
+      Clobber(p[next].reg);
       p[next].in_use = true;
       p[next].pair = false;
       *next_temp = next + 1;
@@ -335,7 +323,7 @@
     if (next >= num_regs)
       next = 0;
     if (p[next].is_temp && !p[next].in_use) {
-      Clobber(cu, p[next].reg);
+      Clobber(p[next].reg);
       p[next].in_use = true;
       p[next].pair = false;
       *next_temp = next + 1;
@@ -344,21 +332,21 @@
     next++;
   }
   if (required) {
-    CodegenDump(cu);
-    DumpRegPool(cu->reg_pool->core_regs,
-          cu->reg_pool->num_core_regs);
+    CodegenDump();
+    DumpRegPool(reg_pool_->core_regs,
+          reg_pool_->num_core_regs);
     LOG(FATAL) << "No free temp registers";
   }
   return -1;  // No register available
 }
 
 //REDO: too many assumptions.
-int AllocTempDouble(CompilationUnit* cu)
+int Mir2Lir::AllocTempDouble()
 {
-  RegisterInfo* p = cu->reg_pool->FPRegs;
-  int num_regs = cu->reg_pool->num_fp_regs;
+  RegisterInfo* p = reg_pool_->FPRegs;
+  int num_regs = reg_pool_->num_fp_regs;
   /* Start looking at an even reg */
-  int next = cu->reg_pool->next_fp_reg & ~0x1;
+  int next = reg_pool_->next_fp_reg & ~0x1;
 
   // First try to avoid allocating live registers
   for (int i=0; i < num_regs; i+=2) {
@@ -366,21 +354,21 @@
       next = 0;
     if ((p[next].is_temp && !p[next].in_use && !p[next].live) &&
       (p[next+1].is_temp && !p[next+1].in_use && !p[next+1].live)) {
-      Clobber(cu, p[next].reg);
-      Clobber(cu, p[next+1].reg);
+      Clobber(p[next].reg);
+      Clobber(p[next+1].reg);
       p[next].in_use = true;
       p[next+1].in_use = true;
       DCHECK_EQ((p[next].reg+1), p[next+1].reg);
       DCHECK_EQ((p[next].reg & 0x1), 0);
-      cu->reg_pool->next_fp_reg = next + 2;
-      if (cu->reg_pool->next_fp_reg >= num_regs) {
-        cu->reg_pool->next_fp_reg = 0;
+      reg_pool_->next_fp_reg = next + 2;
+      if (reg_pool_->next_fp_reg >= num_regs) {
+        reg_pool_->next_fp_reg = 0;
       }
       return p[next].reg;
     }
     next += 2;
   }
-  next = cu->reg_pool->next_fp_reg & ~0x1;
+  next = reg_pool_->next_fp_reg & ~0x1;
 
   // No choice - find a pair and kill it.
   for (int i=0; i < num_regs; i+=2) {
@@ -388,15 +376,15 @@
       next = 0;
     if (p[next].is_temp && !p[next].in_use && p[next+1].is_temp &&
       !p[next+1].in_use) {
-      Clobber(cu, p[next].reg);
-      Clobber(cu, p[next+1].reg);
+      Clobber(p[next].reg);
+      Clobber(p[next+1].reg);
       p[next].in_use = true;
       p[next+1].in_use = true;
       DCHECK_EQ((p[next].reg+1), p[next+1].reg);
       DCHECK_EQ((p[next].reg & 0x1), 0);
-      cu->reg_pool->next_fp_reg = next + 2;
-      if (cu->reg_pool->next_fp_reg >= num_regs) {
-        cu->reg_pool->next_fp_reg = 0;
+      reg_pool_->next_fp_reg = next + 2;
+      if (reg_pool_->next_fp_reg >= num_regs) {
+        reg_pool_->next_fp_reg = 0;
       }
       return p[next].reg;
     }
@@ -407,28 +395,28 @@
 }
 
 /* Return a temp if one is available, -1 otherwise */
-int AllocFreeTemp(CompilationUnit* cu)
+int Mir2Lir::AllocFreeTemp()
 {
-  return AllocTempBody(cu, cu->reg_pool->core_regs,
-             cu->reg_pool->num_core_regs,
-             &cu->reg_pool->next_core_reg, true);
+  return AllocTempBody(reg_pool_->core_regs,
+             reg_pool_->num_core_regs,
+             &reg_pool_->next_core_reg, true);
 }
 
-int AllocTemp(CompilationUnit* cu)
+int Mir2Lir::AllocTemp()
 {
-  return AllocTempBody(cu, cu->reg_pool->core_regs,
-             cu->reg_pool->num_core_regs,
-             &cu->reg_pool->next_core_reg, true);
+  return AllocTempBody(reg_pool_->core_regs,
+             reg_pool_->num_core_regs,
+             &reg_pool_->next_core_reg, true);
 }
 
-int AllocTempFloat(CompilationUnit* cu)
+int Mir2Lir::AllocTempFloat()
 {
-  return AllocTempBody(cu, cu->reg_pool->FPRegs,
-             cu->reg_pool->num_fp_regs,
-             &cu->reg_pool->next_fp_reg, true);
+  return AllocTempBody(reg_pool_->FPRegs,
+             reg_pool_->num_fp_regs,
+             &reg_pool_->next_fp_reg, true);
 }
 
-static RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg)
+Mir2Lir::RegisterInfo* Mir2Lir::AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg)
 {
   int i;
   if (s_reg == -1)
@@ -443,23 +431,23 @@
   return NULL;
 }
 
-RegisterInfo* AllocLive(CompilationUnit* cu, int s_reg, int reg_class)
+Mir2Lir::RegisterInfo* Mir2Lir::AllocLive(int s_reg, int reg_class)
 {
   RegisterInfo* res = NULL;
   switch (reg_class) {
     case kAnyReg:
-      res = AllocLiveBody(cu->reg_pool->FPRegs,
-                cu->reg_pool->num_fp_regs, s_reg);
+      res = AllocLiveBody(reg_pool_->FPRegs,
+                reg_pool_->num_fp_regs, s_reg);
       if (res)
         break;
       /* Intentional fallthrough */
     case kCoreReg:
-      res = AllocLiveBody(cu->reg_pool->core_regs,
-                cu->reg_pool->num_core_regs, s_reg);
+      res = AllocLiveBody(reg_pool_->core_regs,
+                reg_pool_->num_core_regs, s_reg);
       break;
     case kFPReg:
-      res = AllocLiveBody(cu->reg_pool->FPRegs,
-                cu->reg_pool->num_fp_regs, s_reg);
+      res = AllocLiveBody(reg_pool_->FPRegs,
+                reg_pool_->num_fp_regs, s_reg);
       break;
     default:
       LOG(FATAL) << "Invalid register type";
@@ -467,10 +455,10 @@
   return res;
 }
 
-void FreeTemp(CompilationUnit* cu, int reg)
+void Mir2Lir::FreeTemp(int reg)
 {
-  RegisterInfo* p = cu->reg_pool->core_regs;
-  int num_regs = cu->reg_pool->num_core_regs;
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
   int i;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
@@ -481,8 +469,8 @@
       return;
     }
   }
-  p = cu->reg_pool->FPRegs;
-  num_regs = cu->reg_pool->num_fp_regs;
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
       if (p[i].is_temp) {
@@ -495,18 +483,18 @@
   LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
 }
 
-RegisterInfo* IsLive(CompilationUnit* cu, int reg)
+Mir2Lir::RegisterInfo* Mir2Lir::IsLive(int reg)
 {
-  RegisterInfo* p = cu->reg_pool->core_regs;
-  int num_regs = cu->reg_pool->num_core_regs;
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
   int i;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
       return p[i].live ? &p[i] : NULL;
     }
   }
-  p = cu->reg_pool->FPRegs;
-  num_regs = cu->reg_pool->num_fp_regs;
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
       return p[i].live ? &p[i] : NULL;
@@ -515,24 +503,21 @@
   return NULL;
 }
 
-RegisterInfo* IsTemp(CompilationUnit* cu, int reg)
+Mir2Lir::RegisterInfo* Mir2Lir::IsTemp(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* p = cg->GetRegInfo(cu, reg);
+  RegisterInfo* p = GetRegInfo(reg);
   return (p->is_temp) ? p : NULL;
 }
 
-RegisterInfo* IsPromoted(CompilationUnit* cu, int reg)
+Mir2Lir::RegisterInfo* Mir2Lir::IsPromoted(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* p = cg->GetRegInfo(cu, reg);
+  RegisterInfo* p = GetRegInfo(reg);
   return (p->is_temp) ? NULL : p;
 }
 
-bool IsDirty(CompilationUnit* cu, int reg)
+bool Mir2Lir::IsDirty(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* p = cg->GetRegInfo(cu, reg);
+  RegisterInfo* p = GetRegInfo(reg);
   return p->dirty;
 }
 
@@ -541,10 +526,10 @@
  * register.  No check is made to see if the register was previously
  * allocated.  Use with caution.
  */
-void LockTemp(CompilationUnit* cu, int reg)
+void Mir2Lir::LockTemp(int reg)
 {
-  RegisterInfo* p = cu->reg_pool->core_regs;
-  int num_regs = cu->reg_pool->num_core_regs;
+  RegisterInfo* p = reg_pool_->core_regs;
+  int num_regs = reg_pool_->num_core_regs;
   int i;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
@@ -554,8 +539,8 @@
       return;
     }
   }
-  p = cu->reg_pool->FPRegs;
-  num_regs = cu->reg_pool->num_fp_regs;
+  p = reg_pool_->FPRegs;
+  num_regs = reg_pool_->num_fp_regs;
   for (i=0; i< num_regs; i++) {
     if (p[i].reg == reg) {
       DCHECK(p[i].is_temp);
@@ -567,19 +552,18 @@
   LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
 }
 
-static void ResetDefBody(RegisterInfo* p)
+void Mir2Lir::ResetDefBody(RegisterInfo* p)
 {
   p->def_start = NULL;
   p->def_end = NULL;
 }
 
-void ResetDef(CompilationUnit* cu, int reg)
+void Mir2Lir::ResetDef(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  ResetDefBody(cg->GetRegInfo(cu, reg));
+  ResetDefBody(GetRegInfo(reg));
 }
 
-static void NullifyRange(CompilationUnit* cu, LIR *start, LIR *finish, int s_reg1, int s_reg2)
+void Mir2Lir::NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2)
 {
   if (start && finish) {
     LIR *p;
@@ -597,14 +581,12 @@
  * on entry start points to the LIR prior to the beginning of the
  * sequence.
  */
-void MarkDef(CompilationUnit* cu, RegLocation rl,
-             LIR *start, LIR *finish)
+void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish)
 {
   DCHECK(!rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
+  RegisterInfo* p = GetRegInfo(rl.low_reg);
   p->def_start = start->next;
   p->def_end = finish;
 }
@@ -614,26 +596,23 @@
  * on entry start points to the LIR prior to the beginning of the
  * sequence.
  */
-void MarkDefWide(CompilationUnit* cu, RegLocation rl,
-               LIR *start, LIR *finish)
+void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish)
 {
   DCHECK(rl.wide);
   DCHECK(start && start->next);
   DCHECK(finish);
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
-  ResetDef(cu, rl.high_reg);  // Only track low of pair
+  RegisterInfo* p = GetRegInfo(rl.low_reg);
+  ResetDef(rl.high_reg);  // Only track low of pair
   p->def_start = start->next;
   p->def_end = finish;
 }
 
-RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl)
+RegLocation Mir2Lir::WideToNarrow(RegLocation rl)
 {
   DCHECK(rl.wide);
-  Codegen* cg = cu->cg.get();
   if (rl.location == kLocPhysReg) {
-    RegisterInfo* info_lo = cg->GetRegInfo(cu, rl.low_reg);
-    RegisterInfo* info_hi = cg->GetRegInfo(cu, rl.high_reg);
+    RegisterInfo* info_lo = GetRegInfo(rl.low_reg);
+    RegisterInfo* info_hi = GetRegInfo(rl.high_reg);
     if (info_lo->is_temp) {
       info_lo->pair = false;
       info_lo->def_start = NULL;
@@ -649,102 +628,99 @@
   return rl;
 }
 
-void ResetDefLoc(CompilationUnit* cu, RegLocation rl)
+void Mir2Lir::ResetDefLoc(RegLocation rl)
 {
   DCHECK(!rl.wide);
-  RegisterInfo* p = IsTemp(cu, rl.low_reg);
-  if (p && !(cu->disable_opt & (1 << kSuppressLoads))) {
+  RegisterInfo* p = IsTemp(rl.low_reg);
+  if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
     DCHECK(!p->pair);
-    NullifyRange(cu, p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
+    NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
   }
-  ResetDef(cu, rl.low_reg);
+  ResetDef(rl.low_reg);
 }
 
-void ResetDefLocWide(CompilationUnit* cu, RegLocation rl)
+void Mir2Lir::ResetDefLocWide(RegLocation rl)
 {
   DCHECK(rl.wide);
-  RegisterInfo* p_low = IsTemp(cu, rl.low_reg);
-  RegisterInfo* p_high = IsTemp(cu, rl.high_reg);
-  if (p_low && !(cu->disable_opt & (1 << kSuppressLoads))) {
+  RegisterInfo* p_low = IsTemp(rl.low_reg);
+  RegisterInfo* p_high = IsTemp(rl.high_reg);
+  if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
     DCHECK(p_low->pair);
-    NullifyRange(cu, p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
+    NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
   }
-  if (p_high && !(cu->disable_opt & (1 << kSuppressLoads))) {
+  if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
     DCHECK(p_high->pair);
   }
-  ResetDef(cu, rl.low_reg);
-  ResetDef(cu, rl.high_reg);
+  ResetDef(rl.low_reg);
+  ResetDef(rl.high_reg);
 }
 
-void ResetDefTracking(CompilationUnit* cu)
+void Mir2Lir::ResetDefTracking()
 {
   int i;
-  for (i=0; i< cu->reg_pool->num_core_regs; i++) {
-    ResetDefBody(&cu->reg_pool->core_regs[i]);
+  for (i=0; i< reg_pool_->num_core_regs; i++) {
+    ResetDefBody(&reg_pool_->core_regs[i]);
   }
-  for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
-    ResetDefBody(&cu->reg_pool->FPRegs[i]);
+  for (i=0; i< reg_pool_->num_fp_regs; i++) {
+    ResetDefBody(&reg_pool_->FPRegs[i]);
   }
 }
 
-void ClobberAllRegs(CompilationUnit* cu)
+void Mir2Lir::ClobberAllRegs()
 {
   int i;
-  for (i=0; i< cu->reg_pool->num_core_regs; i++) {
-    ClobberBody(cu, &cu->reg_pool->core_regs[i]);
+  for (i=0; i< reg_pool_->num_core_regs; i++) {
+    ClobberBody(&reg_pool_->core_regs[i]);
   }
-  for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
-    ClobberBody(cu, &cu->reg_pool->FPRegs[i]);
+  for (i=0; i< reg_pool_->num_fp_regs; i++) {
+    ClobberBody(&reg_pool_->FPRegs[i]);
   }
 }
 
 // Make sure nothing is live and dirty
-static void FlushAllRegsBody(CompilationUnit* cu, RegisterInfo* info, int num_regs)
+void Mir2Lir::FlushAllRegsBody(RegisterInfo* info, int num_regs)
 {
-  Codegen* cg = cu->cg.get();
   int i;
   for (i=0; i < num_regs; i++) {
     if (info[i].live && info[i].dirty) {
       if (info[i].pair) {
-        cg->FlushRegWide(cu, info[i].reg, info[i].partner);
+        FlushRegWide(info[i].reg, info[i].partner);
       } else {
-        cg->FlushReg(cu, info[i].reg);
+        FlushReg(info[i].reg);
       }
     }
   }
 }
 
-void FlushAllRegs(CompilationUnit* cu)
+void Mir2Lir::FlushAllRegs()
 {
-  FlushAllRegsBody(cu, cu->reg_pool->core_regs,
-           cu->reg_pool->num_core_regs);
-  FlushAllRegsBody(cu, cu->reg_pool->FPRegs,
-           cu->reg_pool->num_fp_regs);
-  ClobberAllRegs(cu);
+  FlushAllRegsBody(reg_pool_->core_regs,
+           reg_pool_->num_core_regs);
+  FlushAllRegsBody(reg_pool_->FPRegs,
+           reg_pool_->num_fp_regs);
+  ClobberAllRegs();
 }
 
 
 //TUNING: rewrite all of this reg stuff.  Probably use an attribute table
-static bool RegClassMatches(CompilationUnit* cu, int reg_class, int reg)
+bool Mir2Lir::RegClassMatches(int reg_class, int reg)
 {
-  Codegen* cg = cu->cg.get();
   if (reg_class == kAnyReg) {
     return true;
   } else if (reg_class == kCoreReg) {
-    return !cg->IsFpReg(reg);
+    return !IsFpReg(reg);
   } else {
-    return cg->IsFpReg(reg);
+    return IsFpReg(reg);
   }
 }
 
-void MarkLive(CompilationUnit* cu, int reg, int s_reg)
+void Mir2Lir::MarkLive(int reg, int s_reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info = cg->GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
     return;  /* already live */
   } else if (s_reg != INVALID_SREG) {
-    ClobberSReg(cu, s_reg);
+    ClobberSReg(s_reg);
     if (info->is_temp) {
       info->live = true;
     }
@@ -756,68 +732,61 @@
   info->s_reg = s_reg;
 }
 
-void MarkTemp(CompilationUnit* cu, int reg)
+void Mir2Lir::MarkTemp(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info = cg->GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   info->is_temp = true;
 }
 
-void UnmarkTemp(CompilationUnit* cu, int reg)
+void Mir2Lir::UnmarkTemp(int reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info = cg->GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   info->is_temp = false;
 }
 
-void MarkPair(CompilationUnit* cu, int low_reg, int high_reg)
+void Mir2Lir::MarkPair(int low_reg, int high_reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info_lo = cg->GetRegInfo(cu, low_reg);
-  RegisterInfo* info_hi = cg->GetRegInfo(cu, high_reg);
+  RegisterInfo* info_lo = GetRegInfo(low_reg);
+  RegisterInfo* info_hi = GetRegInfo(high_reg);
   info_lo->pair = info_hi->pair = true;
   info_lo->partner = high_reg;
   info_hi->partner = low_reg;
 }
 
-void MarkClean(CompilationUnit* cu, RegLocation loc)
+void Mir2Lir::MarkClean(RegLocation loc)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
+  RegisterInfo* info = GetRegInfo(loc.low_reg);
   info->dirty = false;
   if (loc.wide) {
-    info = cg->GetRegInfo(cu, loc.high_reg);
+    info = GetRegInfo(loc.high_reg);
     info->dirty = false;
   }
 }
 
-void MarkDirty(CompilationUnit* cu, RegLocation loc)
+void Mir2Lir::MarkDirty(RegLocation loc)
 {
   if (loc.home) {
     // If already home, can't be dirty
     return;
   }
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
+  RegisterInfo* info = GetRegInfo(loc.low_reg);
   info->dirty = true;
   if (loc.wide) {
-    info = cg->GetRegInfo(cu, loc.high_reg);
+    info = GetRegInfo(loc.high_reg);
     info->dirty = true;
   }
 }
 
-void MarkInUse(CompilationUnit* cu, int reg)
+void Mir2Lir::MarkInUse(int reg)
 {
-  Codegen* cg = cu->cg.get();
-    RegisterInfo* info = cg->GetRegInfo(cu, reg);
+    RegisterInfo* info = GetRegInfo(reg);
     info->in_use = true;
 }
 
-static void CopyRegInfo(CompilationUnit* cu, int new_reg, int old_reg)
+void Mir2Lir::CopyRegInfo(int new_reg, int old_reg)
 {
-  Codegen* cg = cu->cg.get();
-  RegisterInfo* new_info = cg->GetRegInfo(cu, new_reg);
-  RegisterInfo* old_info = cg->GetRegInfo(cu, old_reg);
+  RegisterInfo* new_info = GetRegInfo(new_reg);
+  RegisterInfo* old_info = GetRegInfo(old_reg);
   // Target temp status must not change
   bool is_temp = new_info->is_temp;
   *new_info = *old_info;
@@ -826,15 +795,14 @@
   new_info->reg = new_reg;
 }
 
-static bool CheckCorePoolSanity(CompilationUnit* cu)
+bool Mir2Lir::CheckCorePoolSanity()
 {
-  Codegen* cg = cu->cg.get();
-   for (static int i = 0; i < cu->reg_pool->num_core_regs; i++) {
-     if (cu->reg_pool->core_regs[i].pair) {
-       static int my_reg = cu->reg_pool->core_regs[i].reg;
-       static int my_sreg = cu->reg_pool->core_regs[i].s_reg;
-       static int partner_reg = cu->reg_pool->core_regs[i].partner;
-       static RegisterInfo* partner = cg->GetRegInfo(cu, partner_reg);
+   for (static int i = 0; i < reg_pool_->num_core_regs; i++) {
+     if (reg_pool_->core_regs[i].pair) {
+       static int my_reg = reg_pool_->core_regs[i].reg;
+       static int my_sreg = reg_pool_->core_regs[i].s_reg;
+       static int partner_reg = reg_pool_->core_regs[i].partner;
+       static RegisterInfo* partner = GetRegInfo(partner_reg);
        DCHECK(partner != NULL);
        DCHECK(partner->pair);
        DCHECK_EQ(my_reg, partner->partner);
@@ -846,9 +814,9 @@
          DCHECK((diff == -1) || (diff == 1));
        }
      }
-     if (!cu->reg_pool->core_regs[i].live) {
-       DCHECK(cu->reg_pool->core_regs[i].def_start == NULL);
-       DCHECK(cu->reg_pool->core_regs[i].def_end == NULL);
+     if (!reg_pool_->core_regs[i].live) {
+       DCHECK(reg_pool_->core_regs[i].def_start == NULL);
+       DCHECK(reg_pool_->core_regs[i].def_end == NULL);
      }
    }
    return true;
@@ -864,19 +832,19 @@
  * if it's worthwhile trying to be more clever here.
  */
 
-RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc)
+RegLocation Mir2Lir::UpdateLoc(RegLocation loc)
 {
   DCHECK(!loc.wide);
-  DCHECK(CheckCorePoolSanity(cu));
+  DCHECK(CheckCorePoolSanity());
   if (loc.location != kLocPhysReg) {
     DCHECK((loc.location == kLocDalvikFrame) ||
          (loc.location == kLocCompilerTemp));
-    RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
+    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
     if (info_lo) {
       if (info_lo->pair) {
-        Clobber(cu, info_lo->reg);
-        Clobber(cu, info_lo->partner);
-        FreeTemp(cu, info_lo->reg);
+        Clobber(info_lo->reg);
+        Clobber(info_lo->partner);
+        FreeTemp(info_lo->reg);
       } else {
         loc.low_reg = info_lo->reg;
         loc.location = kLocPhysReg;
@@ -888,25 +856,23 @@
 }
 
 /* see comments for update_loc */
-RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc)
+RegLocation Mir2Lir::UpdateLocWide(RegLocation loc)
 {
   DCHECK(loc.wide);
-  DCHECK(CheckCorePoolSanity(cu));
-  Codegen* cg = cu->cg.get();
+  DCHECK(CheckCorePoolSanity());
   if (loc.location != kLocPhysReg) {
     DCHECK((loc.location == kLocDalvikFrame) ||
          (loc.location == kLocCompilerTemp));
     // Are the dalvik regs already live in physical registers?
-    RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
-    RegisterInfo* info_hi = AllocLive(cu,
-        GetSRegHi(loc.s_reg_low), kAnyReg);
+    RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
+    RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
     bool match = true;
     match = match && (info_lo != NULL);
     match = match && (info_hi != NULL);
     // Are they both core or both FP?
-    match = match && (cg->IsFpReg(info_lo->reg) == cg->IsFpReg(info_hi->reg));
+    match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
     // If a pair of floating point singles, are they properly aligned?
-    if (match && cg->IsFpReg(info_lo->reg)) {
+    if (match && IsFpReg(info_lo->reg)) {
       match &= ((info_lo->reg & 0x1) == 0);
       match &= ((info_hi->reg - info_lo->reg) == 1);
     }
@@ -921,22 +887,22 @@
       loc.low_reg = info_lo->reg;
       loc.high_reg = info_hi->reg;
       loc.location = kLocPhysReg;
-      MarkPair(cu, loc.low_reg, loc.high_reg);
-      DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+      MarkPair(loc.low_reg, loc.high_reg);
+      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
       return loc;
     }
     // Can't easily reuse - clobber and free any overlaps
     if (info_lo) {
-      Clobber(cu, info_lo->reg);
-      FreeTemp(cu, info_lo->reg);
+      Clobber(info_lo->reg);
+      FreeTemp(info_lo->reg);
       if (info_lo->pair)
-        Clobber(cu, info_lo->partner);
+        Clobber(info_lo->partner);
     }
     if (info_hi) {
-      Clobber(cu, info_hi->reg);
-      FreeTemp(cu, info_hi->reg);
+      Clobber(info_hi->reg);
+      FreeTemp(info_hi->reg);
       if (info_hi->pair)
-        Clobber(cu, info_hi->partner);
+        Clobber(info_hi->partner);
     }
   }
   return loc;
@@ -944,42 +910,41 @@
 
 
 /* For use in cases we don't know (or care) width */
-RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc)
+RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc)
 {
   if (loc.wide)
-    return UpdateLocWide(cu, loc);
+    return UpdateLocWide(loc);
   else
-    return UpdateLoc(cu, loc);
+    return UpdateLoc(loc);
 }
 
-RegLocation EvalLocWide(CompilationUnit* cu, RegLocation loc, int reg_class, bool update)
+RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update)
 {
   DCHECK(loc.wide);
   int new_regs;
   int low_reg;
   int high_reg;
-  Codegen* cg = cu->cg.get();
 
-  loc = UpdateLocWide(cu, loc);
+  loc = UpdateLocWide(loc);
 
   /* If already in registers, we can assume proper form.  Right reg class? */
   if (loc.location == kLocPhysReg) {
-    DCHECK_EQ(cg->IsFpReg(loc.low_reg), cg->IsFpReg(loc.high_reg));
-    DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
-    if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
+    DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
+    DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
       /* Wrong register class.  Reallocate and copy */
-      new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
+      new_regs = AllocTypedTempPair(loc.fp, reg_class);
       low_reg = new_regs & 0xff;
       high_reg = (new_regs >> 8) & 0xff;
-      cg->OpRegCopyWide(cu, low_reg, high_reg, loc.low_reg, loc.high_reg);
-      CopyRegInfo(cu, low_reg, loc.low_reg);
-      CopyRegInfo(cu, high_reg, loc.high_reg);
-      Clobber(cu, loc.low_reg);
-      Clobber(cu, loc.high_reg);
+      OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
+      CopyRegInfo(low_reg, loc.low_reg);
+      CopyRegInfo(high_reg, loc.high_reg);
+      Clobber(loc.low_reg);
+      Clobber(loc.high_reg);
       loc.low_reg = low_reg;
       loc.high_reg = high_reg;
-      MarkPair(cu, loc.low_reg, loc.high_reg);
-      DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+      MarkPair(loc.low_reg, loc.high_reg);
+      DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
     }
     return loc;
   }
@@ -987,38 +952,36 @@
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
   DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
 
-  new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
+  new_regs = AllocTypedTempPair(loc.fp, reg_class);
   loc.low_reg = new_regs & 0xff;
   loc.high_reg = (new_regs >> 8) & 0xff;
 
-  MarkPair(cu, loc.low_reg, loc.high_reg);
+  MarkPair(loc.low_reg, loc.high_reg);
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(cu, loc.low_reg, loc.s_reg_low);
-    MarkLive(cu, loc.high_reg, GetSRegHi(loc.s_reg_low));
+    MarkLive(loc.low_reg, loc.s_reg_low);
+    MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
   }
-  DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+  DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
   return loc;
 }
 
-RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
-                int reg_class, bool update)
+RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update)
 {
   int new_reg;
 
   if (loc.wide)
-    return EvalLocWide(cu, loc, reg_class, update);
+    return EvalLocWide(loc, reg_class, update);
 
-  Codegen* cg = cu->cg.get();
-  loc = UpdateLoc(cu, loc);
+  loc = UpdateLoc(loc);
 
   if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
       /* Wrong register class.  Realloc, copy and transfer ownership */
-      new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
-      cg->OpRegCopy(cu, new_reg, loc.low_reg);
-      CopyRegInfo(cu, new_reg, loc.low_reg);
-      Clobber(cu, loc.low_reg);
+      new_reg = AllocTypedTemp(loc.fp, reg_class);
+      OpRegCopy(new_reg, loc.low_reg);
+      CopyRegInfo(new_reg, loc.low_reg);
+      Clobber(loc.low_reg);
       loc.low_reg = new_reg;
     }
     return loc;
@@ -1026,76 +989,33 @@
 
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
 
-  new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
+  new_reg = AllocTypedTemp(loc.fp, reg_class);
   loc.low_reg = new_reg;
 
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(cu, loc.low_reg, loc.s_reg_low);
+    MarkLive(loc.low_reg, loc.s_reg_low);
   }
   return loc;
 }
 
-RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num)
-{
-  DCHECK(num < mir->ssa_rep->num_uses);
-  RegLocation res = cu->reg_location[mir->ssa_rep->uses[num]];
-  return res;
-}
-
-RegLocation GetRawDest(CompilationUnit* cu, MIR* mir)
-{
-  DCHECK_GT(mir->ssa_rep->num_defs, 0);
-  RegLocation res = cu->reg_location[mir->ssa_rep->defs[0]];
-  return res;
-}
-
-RegLocation GetDest(CompilationUnit* cu, MIR* mir)
-{
-  RegLocation res = GetRawDest(cu, mir);
-  DCHECK(!res.wide);
-  return res;
-}
-
-RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num)
-{
-  RegLocation res = GetRawSrc(cu, mir, num);
-  DCHECK(!res.wide);
-  return res;
-}
-
-RegLocation GetDestWide(CompilationUnit* cu, MIR* mir)
-{
-  RegLocation res = GetRawDest(cu, mir);
-  DCHECK(res.wide);
-  return res;
-}
-
-RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir,
-                 int low)
-{
-  RegLocation res = GetRawSrc(cu, mir, low);
-  DCHECK(res.wide);
-  return res;
-}
-
 /* USE SSA names to count references of base Dalvik v_regs. */
-static void CountRefs(CompilationUnit *cu, BasicBlock* bb, RefCounts* core_counts,
-                      RefCounts* fp_counts)
+void Mir2Lir::CountRefs(BasicBlock* bb, RefCounts* core_counts,
+                        RefCounts* fp_counts)
 {
   // TUNING: this routine could use some tweaking.
-  if ((cu->disable_opt & (1 << kPromoteRegs)) ||
+  if ((cu_->disable_opt & (1 << kPromoteRegs)) ||
     !((bb->block_type == kEntryBlock) || (bb->block_type == kExitBlock) ||
       (bb->block_type == kDalvikByteCode))) {
     return;
   }
-  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
-    RegLocation loc = cu->reg_location[i];
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
+    RegLocation loc = mir_graph_->reg_location_[i];
     RefCounts* counts = loc.fp ? fp_counts : core_counts;
-    int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
+    int p_map_idx = SRegToPMap(loc.s_reg_low);
     //Don't count easily regenerated immediates
-    if (loc.fp || !IsInexpensiveConstant(cu, loc)) {
-      counts[p_map_idx].count += cu->mir_graph->GetUseCount(i);
+    if (loc.fp || !IsInexpensiveConstant(loc)) {
+      counts[p_map_idx].count += mir_graph_->GetUseCount(i);
     }
     if (loc.wide && loc.fp && !loc.high_word) {
       counts[p_map_idx].double_start = true;
@@ -1106,12 +1026,12 @@
 /* qsort callback function, sort descending */
 static int SortCounts(const void *val1, const void *val2)
 {
-  const RefCounts* op1 = reinterpret_cast<const RefCounts*>(val1);
-  const RefCounts* op2 = reinterpret_cast<const RefCounts*>(val2);
+  const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
+  const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
   return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1);
 }
 
-static void DumpCounts(const RefCounts* arr, int size, const char* msg)
+void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg)
 {
   LOG(INFO) << msg;
   for (int i = 0; i < size; i++) {
@@ -1123,16 +1043,15 @@
  * Note: some portions of this code required even if the kPromoteRegs
  * optimization is disabled.
  */
-void DoPromotion(CompilationUnit* cu)
+void Mir2Lir::DoPromotion()
 {
-  Codegen* cg = cu->cg.get();
-  int reg_bias = cu->num_compiler_temps + 1;
-  int dalvik_regs = cu->num_dalvik_registers;
+  int reg_bias = cu_->num_compiler_temps + 1;
+  int dalvik_regs = cu_->num_dalvik_registers;
   int num_regs = dalvik_regs + reg_bias;
   const int promotion_threshold = 2;
 
   // Allow target code to add any special registers
-  cg->AdjustSpillMask(cu);
+  AdjustSpillMask();
 
   /*
    * Simple register promotion. Just do a static count of the uses
@@ -1145,30 +1064,30 @@
    * TUNING: replace with linear scan once we have the ability
    * to describe register live ranges for GC.
    */
-  RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu, sizeof(RefCounts) * num_regs,
+  RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu_, sizeof(RefCounts) * num_regs,
                                                        true, kAllocRegAlloc));
-  RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu, sizeof(RefCounts) * num_regs,
+  RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu_, sizeof(RefCounts) * num_regs,
                                                       true, kAllocRegAlloc));
   // Set ssa names for original Dalvik registers
   for (int i = 0; i < dalvik_regs; i++) {
     core_regs[i].s_reg = FpRegs[i].s_reg = i;
   }
   // Set ssa name for Method*
-  core_regs[dalvik_regs].s_reg = cu->method_sreg;
-  FpRegs[dalvik_regs].s_reg = cu->method_sreg;  // For consistecy
+  core_regs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();
+  FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();  // For consistecy
   // Set ssa names for compiler_temps
-  for (int i = 1; i <= cu->num_compiler_temps; i++) {
-    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
+  for (int i = 1; i <= cu_->num_compiler_temps; i++) {
+    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(mir_graph_->compiler_temps_.elem_list[i]);
     core_regs[dalvik_regs + i].s_reg = ct->s_reg;
     FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
   }
 
-  GrowableListIterator iterator = cu->mir_graph->GetBasicBlockIterator();
+  GrowableListIterator iterator = mir_graph_->GetBasicBlockIterator();
   while (true) {
     BasicBlock* bb;
     bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
     if (bb == NULL) break;
-    CountRefs(cu, bb, core_regs, FpRegs);
+    CountRefs(bb, core_regs, FpRegs);
   }
 
   /*
@@ -1186,18 +1105,18 @@
   qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
   qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
 
-  if (cu->verbose) {
+  if (cu_->verbose) {
     DumpCounts(core_regs, num_regs, "Core regs after sort");
     DumpCounts(FpRegs, num_regs, "Fp regs after sort");
   }
 
-  if (!(cu->disable_opt & (1 << kPromoteRegs))) {
+  if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
     // Promote FpRegs
     for (int i = 0; (i < num_regs) &&
             (FpRegs[i].count >= promotion_threshold ); i++) {
-      int p_map_idx = SRegToPMap(cu, FpRegs[i].s_reg);
-      if (cu->promotion_map[p_map_idx].fp_location != kLocPhysReg) {
-        int reg = AllocPreservedFPReg(cu, FpRegs[i].s_reg,
+      int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
+      if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
+        int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
           FpRegs[i].double_start);
         if (reg < 0) {
           break;  // No more left
@@ -1208,10 +1127,10 @@
     // Promote core regs
     for (int i = 0; (i < num_regs) &&
             (core_regs[i].count > promotion_threshold); i++) {
-      int p_map_idx = SRegToPMap(cu, core_regs[i].s_reg);
-      if (cu->promotion_map[p_map_idx].core_location !=
+      int p_map_idx = SRegToPMap(core_regs[i].s_reg);
+      if (promotion_map_[p_map_idx].core_location !=
           kLocPhysReg) {
-        int reg = AllocPreservedCoreReg(cu, core_regs[i].s_reg);
+        int reg = AllocPreservedCoreReg(core_regs[i].s_reg);
         if (reg < 0) {
            break;  // No more left
         }
@@ -1220,20 +1139,20 @@
   }
 
   // Now, update SSA names to new home locations
-  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
-    RegLocation *curr = &cu->reg_location[i];
-    int p_map_idx = SRegToPMap(cu, curr->s_reg_low);
+  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
+    RegLocation *curr = &mir_graph_->reg_location_[i];
+    int p_map_idx = SRegToPMap(curr->s_reg_low);
     if (!curr->wide) {
       if (curr->fp) {
-        if (cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) {
+        if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
           curr->location = kLocPhysReg;
-          curr->low_reg = cu->promotion_map[p_map_idx].FpReg;
+          curr->low_reg = promotion_map_[p_map_idx].FpReg;
           curr->home = true;
         }
       } else {
-        if (cu->promotion_map[p_map_idx].core_location == kLocPhysReg) {
+        if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
           curr->location = kLocPhysReg;
-          curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
+          curr->low_reg = promotion_map_[p_map_idx].core_reg;
           curr->home = true;
         }
       }
@@ -1243,11 +1162,11 @@
         continue;
       }
       if (curr->fp) {
-        if ((cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) &&
-          (cu->promotion_map[p_map_idx+1].fp_location ==
+        if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) &&
+          (promotion_map_[p_map_idx+1].fp_location ==
           kLocPhysReg)) {
-          int low_reg = cu->promotion_map[p_map_idx].FpReg;
-          int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
+          int low_reg = promotion_map_[p_map_idx].FpReg;
+          int high_reg = promotion_map_[p_map_idx+1].FpReg;
           // Doubles require pair of singles starting at even reg
           if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
             curr->location = kLocPhysReg;
@@ -1257,85 +1176,97 @@
           }
         }
       } else {
-        if ((cu->promotion_map[p_map_idx].core_location == kLocPhysReg)
-           && (cu->promotion_map[p_map_idx+1].core_location ==
+        if ((promotion_map_[p_map_idx].core_location == kLocPhysReg)
+           && (promotion_map_[p_map_idx+1].core_location ==
            kLocPhysReg)) {
           curr->location = kLocPhysReg;
-          curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
-          curr->high_reg = cu->promotion_map[p_map_idx+1].core_reg;
+          curr->low_reg = promotion_map_[p_map_idx].core_reg;
+          curr->high_reg = promotion_map_[p_map_idx+1].core_reg;
           curr->home = true;
         }
       }
     }
   }
-  if (cu->verbose) {
-    DumpPromotionMap(cu);
+  if (cu_->verbose) {
+    DumpPromotionMap();
   }
 }
 
 /* Returns sp-relative offset in bytes for a VReg */
-int VRegOffset(CompilationUnit* cu, int v_reg)
+int Mir2Lir::VRegOffset(int v_reg)
 {
-  return StackVisitor::GetVRegOffset(cu->code_item, cu->core_spill_mask,
-                                     cu->fp_spill_mask, cu->frame_size, v_reg);
+  return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_,
+                                     fp_spill_mask_, frame_size_, v_reg);
 }
 
 /* Returns sp-relative offset in bytes for a SReg */
-int SRegOffset(CompilationUnit* cu, int s_reg)
+int Mir2Lir::SRegOffset(int s_reg)
 {
-  return VRegOffset(cu, cu->mir_graph->SRegToVReg(s_reg));
-}
-
-RegLocation GetBadLoc()
-{
-  RegLocation res = bad_loc;
-  return res;
+  return VRegOffset(mir_graph_->SRegToVReg(s_reg));
 }
 
 /* Mark register usage state and return long retloc */
-RegLocation GetReturnWide(CompilationUnit* cu, bool is_double)
+RegLocation Mir2Lir::GetReturnWide(bool is_double)
 {
-  Codegen* cg = cu->cg.get();
-  RegLocation gpr_res = cg->LocCReturnWide();
-  RegLocation fpr_res = cg->LocCReturnDouble();
+  RegLocation gpr_res = LocCReturnWide();
+  RegLocation fpr_res = LocCReturnDouble();
   RegLocation res = is_double ? fpr_res : gpr_res;
-  Clobber(cu, res.low_reg);
-  Clobber(cu, res.high_reg);
-  LockTemp(cu, res.low_reg);
-  LockTemp(cu, res.high_reg);
-  MarkPair(cu, res.low_reg, res.high_reg);
+  Clobber(res.low_reg);
+  Clobber(res.high_reg);
+  LockTemp(res.low_reg);
+  LockTemp(res.high_reg);
+  MarkPair(res.low_reg, res.high_reg);
   return res;
 }
 
-RegLocation GetReturn(CompilationUnit* cu, bool is_float)
+RegLocation Mir2Lir::GetReturn(bool is_float)
 {
-  Codegen* cg = cu->cg.get();
-  RegLocation gpr_res = cg->LocCReturn();
-  RegLocation fpr_res = cg->LocCReturnFloat();
+  RegLocation gpr_res = LocCReturn();
+  RegLocation fpr_res = LocCReturnFloat();
   RegLocation res = is_float ? fpr_res : gpr_res;
-  Clobber(cu, res.low_reg);
-  if (cu->instruction_set == kMips) {
-    MarkInUse(cu, res.low_reg);
+  Clobber(res.low_reg);
+  if (cu_->instruction_set == kMips) {
+    MarkInUse(res.low_reg);
   } else {
-    LockTemp(cu, res.low_reg);
+    LockTemp(res.low_reg);
   }
   return res;
 }
 
-void Codegen::SimpleRegAlloc(CompilationUnit* cu)
+void Mir2Lir::SimpleRegAlloc()
 {
-  DoPromotion(cu);
+  DoPromotion();
 
-  /* Get easily-accessable post-promotion copy of RegLocation for Method* */
-  cu->method_loc = cu->reg_location[cu->method_sreg];
-
-  if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
+  if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) {
     LOG(INFO) << "After Promotion";
-    cu->mir_graph->DumpRegLocTable(cu->reg_location, cu->mir_graph->GetNumSSARegs());
+    mir_graph_->DumpRegLocTable(mir_graph_->reg_location_, mir_graph_->GetNumSSARegs());
   }
 
   /* Set the frame size */
-  cu->frame_size = cu->mir_graph->ComputeFrameSize();
+  frame_size_ = ComputeFrameSize();
+}
+
+/*
+ * Get the "real" sreg number associated with an s_reg slot.  In general,
+ * s_reg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the mir_graph_->reg_location
+ * array.  However, renaming is accomplished by simply replacing RegLocation
+ * entries in the reglocation[] array.  Therefore, when location
+ * records for operands are first created, we need to ask the locRecord
+ * identified by the dataflow pass what it's new name is.
+ */
+int Mir2Lir::GetSRegHi(int lowSreg) {
+  return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
+}
+
+bool Mir2Lir::oat_live_out(int s_reg) {
+  //For now.
+  return true;
+}
+
+int Mir2Lir::oatSSASrc(MIR* mir, int num) {
+  DCHECK_GT(mir->ssa_rep->num_uses, num);
+  return mir->ssa_rep->uses[num];
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/ralloc_util.h b/src/compiler/dex/quick/ralloc_util.h
deleted file mode 100644
index 1f99600..0000000
--- a/src/compiler/dex/quick/ralloc_util.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
-#define ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
-
-/*
- * This file contains target independent register alloction support.
- */
-
-#include "compiler/dex/compiler_ir.h"
-#include "compiler/dex/compiler_utility.h"
-
-namespace art {
-
-/* Static register use counts */
-struct RefCounts {
-  int count;
-  int s_reg;
-  bool double_start;   // Starting v_reg for a double
-};
-
-/*
- * Get the "real" sreg number associated with an s_reg slot.  In general,
- * s_reg values passed through codegen are the SSA names created by
- * dataflow analysis and refer to slot numbers in the cu->reg_location
- * array.  However, renaming is accomplished by simply replacing RegLocation
- * entries in the cu->reglocation[] array.  Therefore, when location
- * records for operands are first created, we need to ask the locRecord
- * identified by the dataflow pass what it's new name is.
- */
-inline int GetSRegHi(int lowSreg) {
-  return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
-}
-
-inline bool oat_live_out(CompilationUnit* cu, int s_reg) {
-  //For now.
-  return true;
-}
-
-inline int oatSSASrc(MIR* mir, int num) {
-  DCHECK_GT(mir->ssa_rep->num_uses, num);
-  return mir->ssa_rep->uses[num];
-}
-
-void ClobberSReg(CompilationUnit* cu, int s_reg);
-RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
-                              int reg_class, bool update);
-// Mark a temp register as dead.  Does not affect allocation state.
-void Clobber(CompilationUnit* cu, int reg);
-
-RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc);
-RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc);
-RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc);
-
-void MarkLive(CompilationUnit* cu, int reg, int s_reg);
-void MarkTemp(CompilationUnit* cu, int reg);
-void UnmarkTemp(CompilationUnit* cu, int reg);
-void MarkDirty(CompilationUnit* cu, RegLocation loc);
-void MarkPair(CompilationUnit* cu, int low_reg, int high_reg);
-void MarkClean(CompilationUnit* cu, RegLocation loc);
-void ResetDef(CompilationUnit* cu, int reg);
-void ResetDefLoc(CompilationUnit* cu, RegLocation rl);
-
-// Set up temp & preserved register pools specialized by target.
-void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
-
-/*
- * Mark the beginning and end LIR of a def sequence.  Note that
- * on entry start points to the LIR prior to the beginning of the
- * sequence.
- */
-void MarkDef(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
-void MarkDefWide(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
-void ResetDefLocWide(CompilationUnit* cu, RegLocation rl);
-void ResetDefTracking(CompilationUnit* cu);
-
-
-// Get the LocRecord associated with an SSA name use.
-RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num);
-RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir, int low);
-// Non-width checking version.
-RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num);
-
-// Get the LocRecord associated with an SSA name def.
-RegLocation GetDest(CompilationUnit* cu, MIR* mir);
-RegLocation GetDestWide(CompilationUnit* cu, MIR* mir);
-// Non-width checking version.
-RegLocation GetRawDest(CompilationUnit* cu, MIR* mir);
-
-// Clobber all regs that might be used by an external C call.
-void ClobberCalleeSave(CompilationUnit* cu);
-
-RegisterInfo *IsTemp(CompilationUnit* cu, int reg);
-RegisterInfo *IsPromoted(CompilationUnit* cu, int reg);
-RegisterInfo *IsLive(CompilationUnit* cu, int reg);
-bool IsDirty(CompilationUnit* cu, int reg);
-
-void MarkInUse(CompilationUnit* cu, int reg);
-
-int AllocTemp(CompilationUnit* cu);
-int AllocTempFloat(CompilationUnit* cu);
-int AllocTempDouble(CompilationUnit* cu);
-void FreeTemp(CompilationUnit* cu, int reg);
-// Return a temp if one is available, -1 otherwise.
-int AllocFreeTemp(CompilationUnit* cu);
-/*
- * Attempt to allocate a callee-save register.
- * Similar to AllocTemp(), but forces the allocation of a specific
- * register.  No check is made to see if the register was previously
- * allocated.  Use with caution.
- */
-void LockTemp(CompilationUnit* cu, int reg);
-
-/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu);
-void FreeCallTemps(CompilationUnit* cu);
-
-void FlushAllRegs(CompilationUnit* cu);
-
-RegLocation GetReturn(CompilationUnit* cu, bool is_float);
-RegLocation GetReturnWide(CompilationUnit* cu, bool is_double);
-RegLocation GetBadLoc();
-RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl);
-
-/*
- * Free all allocated temps in the temp pools.  Note that this does
- * not affect the "liveness" of a temp register, which will stay
- * live until it is either explicitly killed or reallocated.
- */
-void ResetRegPool(CompilationUnit* cu);
-
-void ClobberAllRegs(CompilationUnit* cu);
-
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-
-void FlushReg(CompilationUnit* cu, int reg);
-
-void DoPromotion(CompilationUnit* cu);
-int VRegOffset(CompilationUnit* cu, int reg);
-int SRegOffset(CompilationUnit* cu, int reg);
-void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
-void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
-int SRegToPMap(CompilationUnit* cu, int s_reg);
-void DumpRegPool(RegisterInfo* p, int num_regs);
-
-}  // namespace art
-
-#endif // ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
diff --git a/src/compiler/dex/quick/x86/assemble_x86.cc b/src/compiler/dex/quick/x86/assemble_x86.cc
index 2369e49..f7c1594 100644
--- a/src/compiler/dex/quick/x86/assemble_x86.cc
+++ b/src/compiler/dex/quick/x86/assemble_x86.cc
@@ -15,14 +15,13 @@
  */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
 #include "x86_lir.h"
 
 namespace art {
 
 #define MAX_ASSEMBLER_RETRIES 50
 
-const X86EncodingMap X86Codegen::EncodingMap[kX86Last] = {
+const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = {
   { kX8632BitData, kData,    IS_UNARY_OP,            { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data",  "0x!0d" },
   { kX86Bkpt,      kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
   { kX86Nop,       kNop,     IS_UNARY_OP,            { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop",   "" },
@@ -361,8 +360,8 @@
   return size;
 }
 
-int X86Codegen::GetInsnSize(LIR* lir) {
-  const X86EncodingMap* entry = &X86Codegen::EncodingMap[lir->opcode];
+int X86Mir2Lir::GetInsnSize(LIR* lir) {
+  const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode];
   switch (entry->kind) {
     case kData:
       return 4;  // 4 bytes of data
@@ -479,7 +478,7 @@
     case kMacro:
       DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
       return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
-          ComputeSize(&X86Codegen::EncodingMap[kX86Sub32RI], 0, 0, false) -
+          ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) -
           (lir->operands[0] == rAX  ? 1 : 0);  // shorter ax encoding
     default:
       break;
@@ -499,34 +498,34 @@
   }
 }
 
-static void EmitDisp(CompilationUnit* cu, int base, int disp) {
+void X86Mir2Lir::EmitDisp(int base, int disp) {
   // BP requires an explicit disp, so do not omit it in the 0 case
   if (disp == 0 && base != rBP) {
     return;
   } else if (IS_SIMM8(disp)) {
-    cu->code_buffer.push_back(disp & 0xFF);
+    code_buffer_.push_back(disp & 0xFF);
   } else {
-    cu->code_buffer.push_back(disp & 0xFF);
-    cu->code_buffer.push_back((disp >> 8) & 0xFF);
-    cu->code_buffer.push_back((disp >> 16) & 0xFF);
-    cu->code_buffer.push_back((disp >> 24) & 0xFF);
+    code_buffer_.push_back(disp & 0xFF);
+    code_buffer_.push_back((disp >> 8) & 0xFF);
+    code_buffer_.push_back((disp >> 16) & 0xFF);
+    code_buffer_.push_back((disp >> 24) & 0xFF);
   }
 }
 
-static void EmitOpReg(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg) {
+void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -539,51 +538,51 @@
   }
   if (reg >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
-        << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   DCHECK_LT(reg, 8);
   uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitOpMem(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t base, int disp) {
+void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   DCHECK_LT(entry->skeleton.modrm_opcode, 8);
   DCHECK_LT(base, 8);
   uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
-  cu->code_buffer.push_back(modrm);
-  EmitDisp(cu, base, disp);
+  code_buffer_.push_back(modrm);
+  EmitDisp(base, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitMemReg(CompilationUnit* cu, const X86EncodingMap* entry,
+void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
                        uint8_t base, int disp, uint8_t reg) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -596,43 +595,43 @@
   }
   if (reg >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
-        << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   DCHECK_LT(reg, 8);
   DCHECK_LT(base, 8);
   uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   if (base == rX86_SP) {
     // Special SIB for SP base
-    cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
   }
-  EmitDisp(cu, base, disp);
+  EmitDisp(base, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitRegMem(CompilationUnit* cu, const X86EncodingMap* entry,
+void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry,
                        uint8_t reg, uint8_t base, int disp) {
   // Opcode will flip operands.
-  EmitMemReg(cu, entry, base, disp, reg);
+  EmitMemReg(entry, base, disp, reg);
 }
 
-static void EmitRegArray(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
-                         uint8_t base, uint8_t index, int scale, int disp) {
+void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                  int scale, int disp) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -645,36 +644,35 @@
   }
   DCHECK_LT(reg, 8);
   uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_LT(scale, 4);
   DCHECK_LT(index, 8);
   DCHECK_LT(base, 8);
   uint8_t sib = (scale << 6) | (index << 3) | base;
-  cu->code_buffer.push_back(sib);
-  EmitDisp(cu, base, disp);
+  code_buffer_.push_back(sib);
+  EmitDisp(base, disp);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitArrayReg(CompilationUnit* cu, const X86EncodingMap* entry,
-                         uint8_t base, uint8_t index, int scale, int disp, uint8_t reg) {
+void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                  uint8_t reg) {
   // Opcode will flip operands.
-  EmitRegArray(cu, entry, reg, base, index, scale, disp);
+  EmitRegArray(entry, reg, base, index, scale, disp);
 }
 
-static void EmitRegThread(CompilationUnit* cu, const X86EncodingMap* entry,
-                          uint8_t reg, int disp) {
+void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
   DCHECK_NE(entry->skeleton.prefix1, 0);
-  cu->code_buffer.push_back(entry->skeleton.prefix1);
+  code_buffer_.push_back(entry->skeleton.prefix1);
   if (entry->skeleton.prefix2 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix2);
+    code_buffer_.push_back(entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -687,35 +685,34 @@
   }
   if (reg >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
-        << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   DCHECK_LT(reg, 8);
   uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
-  cu->code_buffer.push_back(modrm);
-  cu->code_buffer.push_back(disp & 0xFF);
-  cu->code_buffer.push_back((disp >> 8) & 0xFF);
-  cu->code_buffer.push_back((disp >> 16) & 0xFF);
-  cu->code_buffer.push_back((disp >> 24) & 0xFF);
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitRegReg(CompilationUnit* cu, const X86EncodingMap* entry,
-                       uint8_t reg1, uint8_t reg2) {
+void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -732,27 +729,27 @@
   DCHECK_LT(reg1, 8);
   DCHECK_LT(reg2, 8);
   uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitRegRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
+void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
                           uint8_t reg1, uint8_t reg2, int32_t imm) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -769,24 +766,24 @@
   DCHECK_LT(reg1, 8);
   DCHECK_LT(reg2, 8);
   uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   switch (entry->skeleton.immediate_bytes) {
     case 1:
       DCHECK(IS_SIMM8(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
       break;
     case 2:
       DCHECK(IS_SIMM16(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
       break;
     case 4:
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
-      cu->code_buffer.push_back((imm >> 16) & 0xFF);
-      cu->code_buffer.push_back((imm >> 24) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
       break;
     default:
       LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -795,24 +792,23 @@
   }
 }
 
-static void EmitRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
-                       uint8_t reg, int imm) {
+void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
   if (reg == rAX && entry->skeleton.ax_opcode != 0) {
-    cu->code_buffer.push_back(entry->skeleton.ax_opcode);
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
   } else {
-    cu->code_buffer.push_back(entry->skeleton.opcode);
+    code_buffer_.push_back(entry->skeleton.opcode);
     if (entry->skeleton.opcode == 0x0F) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+      code_buffer_.push_back(entry->skeleton.extra_opcode1);
       if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-        cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+        code_buffer_.push_back(entry->skeleton.extra_opcode2);
       } else {
         DCHECK_EQ(0, entry->skeleton.extra_opcode2);
       }
@@ -824,23 +820,23 @@
       reg = reg & X86_FP_REG_MASK;
     }
     uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-    cu->code_buffer.push_back(modrm);
+    code_buffer_.push_back(modrm);
   }
   switch (entry->skeleton.immediate_bytes) {
     case 1:
       DCHECK(IS_SIMM8(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
       break;
     case 2:
       DCHECK(IS_SIMM16(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
       break;
     case 4:
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
-      cu->code_buffer.push_back((imm >> 16) & 0xFF);
-      cu->code_buffer.push_back((imm >> 24) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
       break;
     default:
       LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -849,21 +845,20 @@
   }
 }
 
-static void EmitThreadImm(CompilationUnit* cu, const X86EncodingMap* entry,
-                          int disp, int imm) {
+void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -872,26 +867,26 @@
     DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   }
   uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
-  cu->code_buffer.push_back(modrm);
-  cu->code_buffer.push_back(disp & 0xFF);
-  cu->code_buffer.push_back((disp >> 8) & 0xFF);
-  cu->code_buffer.push_back((disp >> 16) & 0xFF);
-  cu->code_buffer.push_back((disp >> 24) & 0xFF);
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
   switch (entry->skeleton.immediate_bytes) {
     case 1:
       DCHECK(IS_SIMM8(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
       break;
     case 2:
       DCHECK(IS_SIMM16(imm));
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
       break;
     case 4:
-      cu->code_buffer.push_back(imm & 0xFF);
-      cu->code_buffer.push_back((imm >> 8) & 0xFF);
-      cu->code_buffer.push_back((imm >> 16) & 0xFF);
-      cu->code_buffer.push_back((imm >> 24) & 0xFF);
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
       break;
     default:
       LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
@@ -901,36 +896,34 @@
   DCHECK_EQ(entry->skeleton.ax_opcode, 0);
 }
 
-static void EmitMovRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
-                       uint8_t reg, int imm) {
+void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
   DCHECK_LT(reg, 8);
-  cu->code_buffer.push_back(0xB8 + reg);
-  cu->code_buffer.push_back(imm & 0xFF);
-  cu->code_buffer.push_back((imm >> 8) & 0xFF);
-  cu->code_buffer.push_back((imm >> 16) & 0xFF);
-  cu->code_buffer.push_back((imm >> 24) & 0xFF);
+  code_buffer_.push_back(0xB8 + reg);
+  code_buffer_.push_back(imm & 0xFF);
+  code_buffer_.push_back((imm >> 8) & 0xFF);
+  code_buffer_.push_back((imm >> 16) & 0xFF);
+  code_buffer_.push_back((imm >> 24) & 0xFF);
 }
 
-static void EmitShiftRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
-                            uint8_t reg, int imm) {
+void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
   if (imm != 1) {
-    cu->code_buffer.push_back(entry->skeleton.opcode);
+    code_buffer_.push_back(entry->skeleton.opcode);
   } else {
     // Shorter encoding for 1 bit shift
-    cu->code_buffer.push_back(entry->skeleton.ax_opcode);
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
   }
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -940,115 +933,111 @@
   }
   if (reg >= 4) {
     DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
-        << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   }
   DCHECK_LT(reg, 8);
   uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   if (imm != 1) {
     DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
     DCHECK(IS_SIMM8(imm));
-    cu->code_buffer.push_back(imm & 0xFF);
+    code_buffer_.push_back(imm & 0xFF);
   }
 }
 
-static void EmitShiftRegCl(CompilationUnit* cu, const X86EncodingMap* entry,
-                           uint8_t reg, uint8_t cl) {
+void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
   DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   DCHECK_EQ(0, entry->skeleton.extra_opcode1);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   DCHECK_LT(reg, 8);
   uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitRegCond(CompilationUnit* cu, const X86EncodingMap* entry,
-                       uint8_t reg, uint8_t condition) {
+void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0x0F, entry->skeleton.opcode);
-  cu->code_buffer.push_back(0x0F);
+  code_buffer_.push_back(0x0F);
   DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
-  cu->code_buffer.push_back(0x90 | condition);
+  code_buffer_.push_back(0x90 | condition);
   DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   DCHECK_LT(reg, 8);
   uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
 }
 
-static void EmitJmp(CompilationUnit* cu, const X86EncodingMap* entry, int rel) {
+void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int rel) {
   if (entry->opcode == kX86Jmp8) {
     DCHECK(IS_SIMM8(rel));
-    cu->code_buffer.push_back(0xEB);
-    cu->code_buffer.push_back(rel & 0xFF);
+    code_buffer_.push_back(0xEB);
+    code_buffer_.push_back(rel & 0xFF);
   } else if (entry->opcode == kX86Jmp32) {
-    cu->code_buffer.push_back(0xE9);
-    cu->code_buffer.push_back(rel & 0xFF);
-    cu->code_buffer.push_back((rel >> 8) & 0xFF);
-    cu->code_buffer.push_back((rel >> 16) & 0xFF);
-    cu->code_buffer.push_back((rel >> 24) & 0xFF);
+    code_buffer_.push_back(0xE9);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
   } else {
     DCHECK(entry->opcode == kX86JmpR);
-    cu->code_buffer.push_back(entry->skeleton.opcode);
+    code_buffer_.push_back(entry->skeleton.opcode);
     uint8_t reg = static_cast<uint8_t>(rel);
     DCHECK_LT(reg, 8);
     uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
-    cu->code_buffer.push_back(modrm);
+    code_buffer_.push_back(modrm);
   }
 }
 
-static void EmitJcc(CompilationUnit* cu, const X86EncodingMap* entry,
-                    int rel, uint8_t cc) {
+void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc) {
   DCHECK_LT(cc, 16);
   if (entry->opcode == kX86Jcc8) {
     DCHECK(IS_SIMM8(rel));
-    cu->code_buffer.push_back(0x70 | cc);
-    cu->code_buffer.push_back(rel & 0xFF);
+    code_buffer_.push_back(0x70 | cc);
+    code_buffer_.push_back(rel & 0xFF);
   } else {
     DCHECK(entry->opcode == kX86Jcc32);
-    cu->code_buffer.push_back(0x0F);
-    cu->code_buffer.push_back(0x80 | cc);
-    cu->code_buffer.push_back(rel & 0xFF);
-    cu->code_buffer.push_back((rel >> 8) & 0xFF);
-    cu->code_buffer.push_back((rel >> 16) & 0xFF);
-    cu->code_buffer.push_back((rel >> 24) & 0xFF);
+    code_buffer_.push_back(0x0F);
+    code_buffer_.push_back(0x80 | cc);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
   }
 }
 
-static void EmitCallMem(CompilationUnit* cu, const X86EncodingMap* entry,
-                        uint8_t base, int disp) {
+void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) {
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -1057,27 +1046,27 @@
     DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   }
   uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
-  cu->code_buffer.push_back(modrm);
+  code_buffer_.push_back(modrm);
   if (base == rX86_SP) {
     // Special SIB for SP base
-    cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
   }
-  EmitDisp(cu, base, disp);
+  EmitDisp(base, disp);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitCallThread(CompilationUnit* cu, const X86EncodingMap* entry, int disp) {
+void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) {
   DCHECK_NE(entry->skeleton.prefix1, 0);
-  cu->code_buffer.push_back(entry->skeleton.prefix1);
+  code_buffer_.push_back(entry->skeleton.prefix1);
   if (entry->skeleton.prefix2 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix2);
+    code_buffer_.push_back(entry->skeleton.prefix2);
   }
-  cu->code_buffer.push_back(entry->skeleton.opcode);
+  code_buffer_.push_back(entry->skeleton.opcode);
   if (entry->skeleton.opcode == 0x0F) {
-    cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
     if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
-      cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
     } else {
       DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     }
@@ -1086,30 +1075,30 @@
     DCHECK_EQ(0, entry->skeleton.extra_opcode2);
   }
   uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
-  cu->code_buffer.push_back(modrm);
-  cu->code_buffer.push_back(disp & 0xFF);
-  cu->code_buffer.push_back((disp >> 8) & 0xFF);
-  cu->code_buffer.push_back((disp >> 16) & 0xFF);
-  cu->code_buffer.push_back((disp >> 24) & 0xFF);
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
-static void EmitPcRel(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
+void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, uint8_t reg,
                       int base_or_table, uint8_t index, int scale, int table_or_disp) {
   int disp;
   if (entry->opcode == kX86PcRelLoadRA) {
-    Codegen::SwitchTable *tab_rec = reinterpret_cast<Codegen::SwitchTable*>(table_or_disp);
+    Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
     disp = tab_rec->offset;
   } else {
     DCHECK(entry->opcode == kX86PcRelAdr);
-    Codegen::FillArrayData *tab_rec = reinterpret_cast<Codegen::FillArrayData*>(base_or_table);
+    Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
     disp = tab_rec->offset;
   }
   if (entry->skeleton.prefix1 != 0) {
-    cu->code_buffer.push_back(entry->skeleton.prefix1);
+    code_buffer_.push_back(entry->skeleton.prefix1);
     if (entry->skeleton.prefix2 != 0) {
-      cu->code_buffer.push_back(entry->skeleton.prefix2);
+      code_buffer_.push_back(entry->skeleton.prefix2);
     }
   } else {
     DCHECK_EQ(0, entry->skeleton.prefix2);
@@ -1119,50 +1108,48 @@
   }
   DCHECK_LT(reg, 8);
   if (entry->opcode == kX86PcRelLoadRA) {
-    cu->code_buffer.push_back(entry->skeleton.opcode);
+    code_buffer_.push_back(entry->skeleton.opcode);
     DCHECK_EQ(0, entry->skeleton.extra_opcode1);
     DCHECK_EQ(0, entry->skeleton.extra_opcode2);
     uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
-    cu->code_buffer.push_back(modrm);
+    code_buffer_.push_back(modrm);
     DCHECK_LT(scale, 4);
     DCHECK_LT(index, 8);
     DCHECK_LT(base_or_table, 8);
     uint8_t base = static_cast<uint8_t>(base_or_table);
     uint8_t sib = (scale << 6) | (index << 3) | base;
-    cu->code_buffer.push_back(sib);
+    code_buffer_.push_back(sib);
     DCHECK_EQ(0, entry->skeleton.immediate_bytes);
   } else {
-    cu->code_buffer.push_back(entry->skeleton.opcode + reg);
+    code_buffer_.push_back(entry->skeleton.opcode + reg);
   }
-  cu->code_buffer.push_back(disp & 0xFF);
-  cu->code_buffer.push_back((disp >> 8) & 0xFF);
-  cu->code_buffer.push_back((disp >> 16) & 0xFF);
-  cu->code_buffer.push_back((disp >> 24) & 0xFF);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
   DCHECK_EQ(0, entry->skeleton.modrm_opcode);
   DCHECK_EQ(0, entry->skeleton.ax_opcode);
 }
 
-static void EmitMacro(CompilationUnit* cu, const X86EncodingMap* entry,
-                      uint8_t reg, int offset) {
+void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) {
   DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
-  cu->code_buffer.push_back(0xE8);  // call +0
-  cu->code_buffer.push_back(0);
-  cu->code_buffer.push_back(0);
-  cu->code_buffer.push_back(0);
-  cu->code_buffer.push_back(0);
+  code_buffer_.push_back(0xE8);  // call +0
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
 
   DCHECK_LT(reg, 8);
-  cu->code_buffer.push_back(0x58 + reg);  // pop reg
+  code_buffer_.push_back(0x58 + reg);  // pop reg
 
-  EmitRegImm(cu, &X86Codegen::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+  EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
 }
 
-static void EmitUnimplemented(CompilationUnit* cu, const X86EncodingMap* entry, LIR* lir) {
-  Codegen* cg = cu->cg.get();
+void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
   UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
-                         << cg->BuildInsnString(entry->fmt, lir, 0);
-  for (int i = 0; i < cg->GetInsnSize(lir); ++i) {
-    cu->code_buffer.push_back(0xCC);  // push breakpoint instruction - int 3
+                         << BuildInsnString(entry->fmt, lir, 0);
+  for (int i = 0; i < GetInsnSize(lir); ++i) {
+    code_buffer_.push_back(0xCC);  // push breakpoint instruction - int 3
   }
 }
 
@@ -1172,12 +1159,12 @@
  * instruction.  In those cases we will try to substitute a new code
  * sequence or request that the trace be shortened and retried.
  */
-AssemblerStatus X86Codegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
+AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
   const bool kVerbosePcFixup = false;
-  for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
     if (lir->opcode < 0) {
       continue;
     }
@@ -1206,14 +1193,14 @@
                   << " delta: " << delta << " old delta: " << lir->operands[0];
             }
             lir->opcode = kX86Jcc32;
-            SetupResourceMasks(cu, lir);
+            SetupResourceMasks(lir);
             res = kRetryAll;
           }
           if (kVerbosePcFixup) {
             LOG(INFO) << "Source:";
-            DumpLIRInsn(cu, lir, 0);
+            DumpLIRInsn(lir, 0);
             LOG(INFO) << "Target:";
-            DumpLIRInsn(cu, target_lir, 0);
+            DumpLIRInsn(target_lir, 0);
             LOG(INFO) << "Delta " << delta;
           }
           lir->operands[0] = delta;
@@ -1227,9 +1214,9 @@
           int delta = target - pc;
           if (kVerbosePcFixup) {
             LOG(INFO) << "Source:";
-            DumpLIRInsn(cu, lir, 0);
+            DumpLIRInsn(lir, 0);
             LOG(INFO) << "Target:";
-            DumpLIRInsn(cu, target_lir, 0);
+            DumpLIRInsn(target_lir, 0);
             LOG(INFO) << "Delta " << delta;
           }
           lir->operands[0] = delta;
@@ -1247,7 +1234,7 @@
           }
           uintptr_t target = target_lir->offset;
           delta = target - pc;
-          if (!(cu->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
+          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
             // Useless branch
             lir->flags.is_nop = true;
             if (kVerbosePcFixup) {
@@ -1259,7 +1246,7 @@
               LOG(INFO) << "Retry for JMP growth at " << lir->offset;
             }
             lir->opcode = kX86Jmp32;
-            SetupResourceMasks(cu, lir);
+            SetupResourceMasks(lir);
             res = kRetryAll;
           }
           lir->operands[0] = delta;
@@ -1287,21 +1274,21 @@
     if (res != kSuccess) {
       continue;
     }
-    CHECK_EQ(static_cast<size_t>(lir->offset), cu->code_buffer.size());
-    const X86EncodingMap *entry = &X86Codegen::EncodingMap[lir->opcode];
-    size_t starting_cbuf_size = cu->code_buffer.size();
+    CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size());
+    const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode];
+    size_t starting_cbuf_size = code_buffer_.size();
     switch (entry->kind) {
       case kData:  // 4 bytes of data
-        cu->code_buffer.push_back(lir->operands[0]);
+        code_buffer_.push_back(lir->operands[0]);
         break;
       case kNullary:  // 1 byte of opcode
         DCHECK_EQ(0, entry->skeleton.prefix1);
         DCHECK_EQ(0, entry->skeleton.prefix2);
-        cu->code_buffer.push_back(entry->skeleton.opcode);
+        code_buffer_.push_back(entry->skeleton.opcode);
         if (entry->skeleton.extra_opcode1 != 0) {
-          cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+          code_buffer_.push_back(entry->skeleton.extra_opcode1);
           if (entry->skeleton.extra_opcode2 != 0) {
-            cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+            code_buffer_.push_back(entry->skeleton.extra_opcode2);
           }
         } else {
           DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -1311,88 +1298,88 @@
         DCHECK_EQ(0, entry->skeleton.immediate_bytes);
         break;
       case kReg:  // lir operands - 0: reg
-        EmitOpReg(cu, entry, lir->operands[0]);
+        EmitOpReg(entry, lir->operands[0]);
         break;
       case kMem:  // lir operands - 0: base, 1: disp
-        EmitOpMem(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitOpMem(entry, lir->operands[0], lir->operands[1]);
         break;
       case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
-        EmitMemReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
       case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-        EmitArrayReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+        EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                      lir->operands[3], lir->operands[4]);
         break;
       case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
-        EmitRegMem(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
       case kRegArray:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
-        EmitRegArray(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+        EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                      lir->operands[3], lir->operands[4]);
         break;
       case kRegThread:  // lir operands - 0: reg, 1: disp
-        EmitRegThread(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitRegThread(entry, lir->operands[0], lir->operands[1]);
         break;
       case kRegReg:  // lir operands - 0: reg1, 1: reg2
-        EmitRegReg(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitRegReg(entry, lir->operands[0], lir->operands[1]);
         break;
       case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
-        EmitRegReg(cu, entry, lir->operands[1], lir->operands[0]);
+        EmitRegReg(entry, lir->operands[1], lir->operands[0]);
         break;
       case kRegRegImm:
-        EmitRegRegImm(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
       case kRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitRegImm(entry, lir->operands[0], lir->operands[1]);
         break;
       case kThreadImm:  // lir operands - 0: disp, 1: immediate
-        EmitThreadImm(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitThreadImm(entry, lir->operands[0], lir->operands[1]);
         break;
       case kMovRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitMovRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitMovRegImm(entry, lir->operands[0], lir->operands[1]);
         break;
       case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitShiftRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]);
         break;
       case kShiftRegCl: // lir operands - 0: reg, 1: cl
-        EmitShiftRegCl(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
         break;
       case kRegCond:  // lir operands - 0: reg, 1: condition
-        EmitRegCond(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitRegCond(entry, lir->operands[0], lir->operands[1]);
         break;
       case kJmp:  // lir operands - 0: rel
-        EmitJmp(cu, entry, lir->operands[0]);
+        EmitJmp(entry, lir->operands[0]);
         break;
       case kJcc:  // lir operands - 0: rel, 1: CC, target assigned
-        EmitJcc(cu, entry, lir->operands[0], lir->operands[1]);
+        EmitJcc(entry, lir->operands[0], lir->operands[1]);
         break;
       case kCall:
         switch (entry->opcode) {
           case kX86CallM:  // lir operands - 0: base, 1: disp
-            EmitCallMem(cu, entry, lir->operands[0], lir->operands[1]);
+            EmitCallMem(entry, lir->operands[0], lir->operands[1]);
             break;
           case kX86CallT:  // lir operands - 0: disp
-            EmitCallThread(cu, entry, lir->operands[0]);
+            EmitCallThread(entry, lir->operands[0]);
             break;
           default:
-            EmitUnimplemented(cu, entry, lir);
+            EmitUnimplemented(entry, lir);
             break;
         }
         break;
       case kPcRel:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
-        EmitPcRel(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+        EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2],
                   lir->operands[3], lir->operands[4]);
         break;
       case kMacro:
-        EmitMacro(cu, entry, lir->operands[0], lir->offset);
+        EmitMacro(entry, lir->operands[0], lir->offset);
         break;
       default:
-        EmitUnimplemented(cu, entry, lir);
+        EmitUnimplemented(entry, lir);
         break;
     }
     CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
-             cu->code_buffer.size() - starting_cbuf_size)
-        << "Instruction size mismatch for entry: " << X86Codegen::EncodingMap[lir->opcode].name;
+             code_buffer_.size() - starting_cbuf_size)
+        << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
   }
   return res;
 }
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index f30e966..6b215f2 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -17,13 +17,11 @@
 /* This file contains codegen for the X86 ISA */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "x86_lir.h"
 
 namespace art {
 
-void X86Codegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case)
 {
   // TODO
@@ -33,24 +31,23 @@
  * The sparse table in the literal pool is an array of <key,displacement>
  * pairs.
  */
-void X86Codegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
   int entries = table[1];
   const int* keys = reinterpret_cast<const int*>(&table[2]);
   const int* targets = &keys[entries];
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  rl_src = LoadValue(rl_src, kCoreReg);
   for (int i = 0; i < entries; i++) {
     int key = keys[i];
     BasicBlock* case_block =
-        cu->mir_graph.get()->FindBlock(cu->current_dalvik_offset + targets[i]);
-    LIR* label_list = cu->block_label_list;
-    OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
-                   &label_list[case_block->id]);
+        mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+    OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
+                   &block_label_list_[case_block->id]);
   }
 }
 
@@ -70,53 +67,53 @@
  * jmp  r_start_of_method
  * done:
  */
-void X86Codegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
-  if (cu->verbose) {
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable *>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable *>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
+  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Get the switch value
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  int start_of_method_reg = AllocTemp(cu);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int start_of_method_reg = AllocTemp();
   // Materialize a pointer to the switch table
-  //NewLIR0(cu, kX86Bkpt);
-  NewLIR1(cu, kX86StartOfMethod, start_of_method_reg);
+  //NewLIR0(kX86Bkpt);
+  NewLIR1(kX86StartOfMethod, start_of_method_reg);
   int low_key = s4FromSwitchData(&table[2]);
   int keyReg;
   // Remove the bias, if necessary
   if (low_key == 0) {
     keyReg = rl_src.low_reg;
   } else {
-    keyReg = AllocTemp(cu);
-    OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
+    keyReg = AllocTemp();
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
   }
   // Bounds check - if < 0 or >= size continue following switch
-  OpRegImm(cu, kOpCmp, keyReg, size-1);
-  LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
+  OpRegImm(kOpCmp, keyReg, size-1);
+  LIR* branch_over = OpCondBranch(kCondHi, NULL);
 
   // Load the displacement from the switch table
-  int disp_reg = AllocTemp(cu);
-  NewLIR5(cu, kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
+  int disp_reg = AllocTemp();
+  NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
           reinterpret_cast<uintptr_t>(tab_rec));
   // Add displacement to start of method
-  OpRegReg(cu, kOpAdd, start_of_method_reg, disp_reg);
+  OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
   // ..and go!
-  LIR* switch_branch = NewLIR1(cu, kX86JmpR, start_of_method_reg);
+  LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg);
   tab_rec->anchor = switch_branch;
 
   /* branch_over target here */
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
 }
 
@@ -130,98 +127,98 @@
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void X86Codegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
 {
-  const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+      static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
   tab_rec->table = table;
-  tab_rec->vaddr = cu->current_dalvik_offset;
+  tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
 
   // Making a call - use explicit registers
-  FlushAllRegs(cu);   /* Everything to home location */
-  LoadValueDirectFixed(cu, rl_src, rX86_ARG0);
+  FlushAllRegs();   /* Everything to home location */
+  LoadValueDirectFixed(rl_src, rX86_ARG0);
   // Materialize a pointer to the fill data image
-  NewLIR1(cu, kX86StartOfMethod, rX86_ARG2);
-  NewLIR2(cu, kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
-  NewLIR2(cu, kX86Add32RR, rX86_ARG1, rX86_ARG2);
-  CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+  NewLIR1(kX86StartOfMethod, rX86_ARG2);
+  NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
                           rX86_ARG1, true);
 }
 
-void X86Codegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  LoadValueDirectFixed(cu, rl_src, rCX);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, rCX, opt_flags);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rCX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rCX, opt_flags);
   // If lock is unheld, try to grab it quickly with compare and exchange
   // TODO: copy and clear hash state?
-  NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
-  NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
-  NewLIR2(cu, kX86Xor32RR, rAX, rAX);
-  NewLIR3(cu, kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondEq);
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR2(kX86Xor32RR, rAX, rAX);
+  NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
   // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
-  CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
-  branch->target = NewLIR0(cu, kPseudoTargetLabel);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  branch->target = NewLIR0(kPseudoTargetLabel);
 }
 
-void X86Codegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  LoadValueDirectFixed(cu, rl_src, rAX);  // Get obj
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  GenNullCheck(cu, rl_src.s_reg_low, rAX, opt_flags);
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rAX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rAX, opt_flags);
   // If lock is held by the current thread, clear it to quickly release it
   // TODO: clear hash state?
-  NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
-  NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
-  NewLIR3(cu, kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
-  OpRegReg(cu, kOpSub, rCX, rDX);
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondNe);
-  NewLIR3(cu, kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
-  LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
-  branch->target = NewLIR0(cu, kPseudoTargetLabel);
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR3(kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
+  OpRegReg(kOpSub, rCX, rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+  NewLIR3(kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
   // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
-  CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
-  branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
 }
 
-void X86Codegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+void X86Mir2Lir::GenMoveException(RegLocation rl_dest)
 {
   int ex_offset = Thread::ExceptionOffset().Int32Value();
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  NewLIR2(cu, kX86Mov32RT, rl_result.low_reg, ex_offset);
-  NewLIR2(cu, kX86Mov32TI, ex_offset, 0);
-  StoreValue(cu, rl_dest, rl_result);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
+  NewLIR2(kX86Mov32TI, ex_offset, 0);
+  StoreValue(rl_dest, rl_result);
 }
 
 /*
  * Mark garbage collection card. Skip if the value we're storing is null.
  */
-void X86Codegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
 {
-  int reg_card_base = AllocTemp(cu);
-  int reg_card_no = AllocTemp(cu);
-  LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
-  NewLIR2(cu, kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
-  OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
-  StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  NewLIR2(kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   branch_over->target = target;
-  FreeTemp(cu, reg_card_base);
-  FreeTemp(cu, reg_card_no);
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
 }
 
-void X86Codegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
 {
   /*
    * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live.  Let the register
@@ -229,54 +226,54 @@
    * expanding the frame or flushing.  This leaves the utility
    * code with no spare temps.
    */
-  LockTemp(cu, rX86_ARG0);
-  LockTemp(cu, rX86_ARG1);
-  LockTemp(cu, rX86_ARG2);
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
 
   /* Build frame, return address already on stack */
-  OpRegImm(cu, kOpSub, rX86_SP, cu->frame_size - 4);
+  OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
 
   /*
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
-                (static_cast<size_t>(cu->frame_size) <
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+                (static_cast<size_t>(frame_size_) <
                 Thread::kStackOverflowReservedBytes));
-  NewLIR0(cu, kPseudoMethodEntry);
+  NewLIR0(kPseudoMethodEntry);
   /* Spill core callee saves */
-  SpillCoreRegs(cu);
+  SpillCoreRegs();
   /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
-  DCHECK_EQ(cu->num_fp_spills, 0);
+  DCHECK_EQ(num_fp_spills_, 0);
   if (!skip_overflow_check) {
     // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
-    LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
-    OpRegThreadMem(cu, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
-    OpCondBranch(cu, kCondUlt, tgt);
+    LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpCondBranch(kCondUlt, tgt);
     // Remember branch target - will process later
-    InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+    InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
   }
 
-  FlushIns(cu, ArgLocs, rl_method);
+  FlushIns(ArgLocs, rl_method);
 
-  FreeTemp(cu, rX86_ARG0);
-  FreeTemp(cu, rX86_ARG1);
-  FreeTemp(cu, rX86_ARG2);
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
 }
 
-void X86Codegen::GenExitSequence(CompilationUnit* cu) {
+void X86Mir2Lir::GenExitSequence() {
   /*
    * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
    * allocated by the register utilities as temps.
    */
-  LockTemp(cu, rX86_RET0);
-  LockTemp(cu, rX86_RET1);
+  LockTemp(rX86_RET0);
+  LockTemp(rX86_RET1);
 
-  NewLIR0(cu, kPseudoMethodExit);
-  UnSpillCoreRegs(cu);
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
   /* Remove frame except for return address */
-  OpRegImm(cu, kOpAdd, rX86_SP, cu->frame_size - 4);
-  NewLIR0(cu, kX86Ret);
+  OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+  NewLIR0(kX86Ret);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index 35c976a..93b6839 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -22,64 +22,60 @@
 
 namespace art {
 
-class X86Codegen : public Codegen {
+class X86Mir2Lir : public Mir2Lir {
   public:
+
+    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+
     // Required for target - codegen helpers.
-    virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit);
-    virtual int LoadHelper(CompilationUnit* cu, int offset);
-    virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
-                              OpSize size, int s_reg);
-    virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
-                                  int r_dest_hi, int s_reg);
-    virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
-                                 OpSize size);
-    virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
-                                     int s_reg);
-    virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
-    virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
-    virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
-                               OpSize size);
-    virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
-                                   int r_src_hi);
-    virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
-                                 OpSize size);
-    virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
-                                      int displacement, int r_src, int r_src_hi, OpSize size,
-                                      int s_reg);
-    virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+    virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    virtual int LoadHelper(int offset);
+    virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    virtual LIR* LoadConstantNoClobber(int r_dest, int value);
+    virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    virtual void MarkGCCard(int val_reg, int tgt_addr_reg);
 
     // Required for target - register utilities.
     virtual bool IsFpReg(int reg);
     virtual bool SameRegType(int reg1, int reg2);
-    virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
-    virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+    virtual int AllocTypedTemp(bool fp_hint, int reg_class);
+    virtual int AllocTypedTempPair(bool fp_hint, int reg_class);
     virtual int S2d(int low_reg, int high_reg);
     virtual int TargetReg(SpecialTargetRegister reg);
-    virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
-    virtual RegLocation GetReturnAlt(CompilationUnit* cu);
-    virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+    virtual RegisterInfo* GetRegInfo(int reg);
+    virtual RegLocation GetReturnAlt();
+    virtual RegLocation GetReturnWideAlt();
     virtual RegLocation LocCReturn();
     virtual RegLocation LocCReturnDouble();
     virtual RegLocation LocCReturnFloat();
     virtual RegLocation LocCReturnWide();
     virtual uint32_t FpRegMask();
-    virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
-    virtual void AdjustSpillMask(CompilationUnit* cu);
-    virtual void ClobberCalleeSave(CompilationUnit *cu);
-    virtual void FlushReg(CompilationUnit* cu, int reg);
-    virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
-    virtual void FreeCallTemps(CompilationUnit* cu);
-    virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
-    virtual void LockCallTemps(CompilationUnit* cu);
-    virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
-    virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+    virtual uint64_t GetRegMaskCommon(int reg);
+    virtual void AdjustSpillMask();
+    virtual void ClobberCalleeSave();
+    virtual void FlushReg(int reg);
+    virtual void FlushRegWide(int reg1, int reg2);
+    virtual void FreeCallTemps();
+    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    virtual void LockCallTemps();
+    virtual void MarkPreservedSingle(int v_reg, int reg);
+    virtual void CompilerInitializeRegAlloc();
 
     // Required for target - miscellaneous.
-    virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+    virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr);
     virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-    virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+    virtual void SetupTargetResourceMasks(LIR* lir);
     virtual const char* GetTargetInstFmt(int opcode);
     virtual const char* GetTargetInstName(int opcode);
     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
@@ -89,115 +85,119 @@
     virtual bool IsUnconditionalBranch(LIR* lir);
 
     // Required for target - Dalvik-level generators.
-    virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+    virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array,
                                 RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_dest, int scale);
-    virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale);
-    virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
-    virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2);
-    virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+    virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
                                  RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src);
-    virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
-    virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
-    virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
-    virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2);
-    virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
-                                int offset, ThrowKind kind);
-    virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
-                                  bool is_div);
-    virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
-                                     bool is_div);
-    virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2);
-    virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
-    virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
-                                  RegLocation rl_method);
-    virtual void GenExitSequence(CompilationUnit* cu);
-    virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
-                                  RegLocation rl_src);
-    virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double);
-    virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
-    virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
-    virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
-    virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
-    virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
-                                               RegLocation rl_result, int lit, int first_bit,
-                                               int second_bit);
-    virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
-                                 RegLocation rl_src);
-    virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
-                                SpecialCaseHandler special_case);
+    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    virtual bool GenInlinedSqrt(CallInfo* info);
+    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenDivZeroCheck(int reg_lo, int reg_hi);
+    virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    virtual void GenExitSequence();
+    virtual void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(BasicBlock* bb, MIR* mir);
+    virtual void GenMemBarrier(MemBarrierKind barrier_kind);
+    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    virtual void GenMoveException(RegLocation rl_dest);
+    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+                                               int lit, int first_bit, int second_bit);
+    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
 
     // Single operation generators.
-    virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
-                             LIR* target);
-    virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
-                                LIR* target);
-    virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
-    virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
-                                LIR* target);
-    virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
-    virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
-    virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
-    virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
-    virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
-    virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
-    virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
-    virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
-    virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
-    virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
-                             int r_src2);
-    virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
-    virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
-    virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
-    virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
-    virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
-                       int offset);
-    virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
-                               int src_hi);
-    virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+    virtual LIR* OpUnconditionalBranch(LIR* target);
+    virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    virtual LIR* OpFpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpIT(ConditionCode cond, const char* guide);
+    virtual LIR* OpMem(OpKind op, int rBase, int disp);
+    virtual LIR* OpPcRelLoad(int reg, LIR* target);
+    virtual LIR* OpReg(OpKind op, int r_dest_src);
+    virtual LIR* OpRegCopy(int r_dest, int r_src);
+    virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    virtual LIR* OpTestSuspend(LIR* target);
+    virtual LIR* OpThreadMem(OpKind op, int thread_offset);
+    virtual LIR* OpVldm(int rBase, int count);
+    virtual LIR* OpVstm(int rBase, int count);
+    virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    virtual void OpTlsCmp(int offset, int val);
 
-    void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
-    void SpillCoreRegs(CompilationUnit* cu);
-    void UnSpillCoreRegs(CompilationUnit* cu);
+    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void SpillCoreRegs();
+    void UnSpillCoreRegs();
     static const X86EncodingMap EncodingMap[kX86Last];
     bool InexpensiveConstantInt(int32_t value);
     bool InexpensiveConstantFloat(int32_t value);
     bool InexpensiveConstantLong(int64_t value);
     bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void EmitDisp(int base, int disp);
+    void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
+    void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
+    void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
+    void EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                      int scale, int disp);
+    void EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                      uint8_t reg);
+    void EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp);
+    void EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2);
+    void EmitRegRegImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
+    void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
+    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl);
+    void EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition);
+    void EmitJmp(const X86EncodingMap* entry, int rel);
+    void EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc);
+    void EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitCallThread(const X86EncodingMap* entry, int disp);
+    void EmitPcRel(const X86EncodingMap* entry, uint8_t reg, int base_or_table, uint8_t index,
+                   int scale, int table_or_disp);
+    void EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset);
+    void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
 };
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/x86/fp_x86.cc b/src/compiler/dex/quick/x86/fp_x86.cc
index 888a4a6..db2cf28 100644
--- a/src/compiler/dex/quick/x86/fp_x86.cc
+++ b/src/compiler/dex/quick/x86/fp_x86.cc
@@ -15,13 +15,11 @@
  */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "x86_lir.h"
 
 namespace art {
 
-void X86Codegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   X86OpCode op = kX86Nop;
   RegLocation rl_result;
@@ -49,33 +47,33 @@
       break;
     case Instruction::REM_FLOAT_2ADDR:
     case Instruction::REM_FLOAT:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
-      rl_result = GetReturn(cu, true);
-      StoreValue(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
       return;
     case Instruction::NEG_FLOAT:
-      GenNegFloat(cu, rl_dest, rl_src1);
+      GenNegFloat(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-  rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-  rl_src2 = LoadValue(cu, rl_src2, kFPReg);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
   int r_dest = rl_result.low_reg;
   int r_src1 = rl_src1.low_reg;
   int r_src2 = rl_src2.low_reg;
   if (r_dest == r_src2) {
-    r_src2 = AllocTempFloat(cu);
-    OpRegCopy(cu, r_src2, r_dest);
+    r_src2 = AllocTempFloat();
+    OpRegCopy(r_src2, r_dest);
   }
-  OpRegCopy(cu, r_dest, r_src1);
-  NewLIR2(cu, op, r_dest, r_src2);
-  StoreValue(cu, rl_dest, rl_result);
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValue(rl_dest, rl_result);
 }
 
-void X86Codegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
   X86OpCode op = kX86Nop;
   RegLocation rl_result;
@@ -99,37 +97,37 @@
       break;
     case Instruction::REM_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE:
-      FlushAllRegs(cu);   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(cu, true);
-      StoreValueWide(cu, rl_dest, rl_result);
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
       return;
     case Instruction::NEG_DOUBLE:
-      GenNegDouble(cu, rl_dest, rl_src1);
+      GenNegDouble(rl_dest, rl_src1);
       return;
     default:
       LOG(FATAL) << "Unexpected opcode: " << opcode;
   }
-  rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
   DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
   DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
   int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
   int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
   int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
   if (r_dest == r_src2) {
-    r_src2 = AllocTempDouble(cu) | X86_FP_DOUBLE;
-    OpRegCopy(cu, r_src2, r_dest);
+    r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
+    OpRegCopy(r_src2, r_dest);
   }
-  OpRegCopy(cu, r_dest, r_src1);
-  NewLIR2(cu, op, r_dest, r_src2);
-  StoreValueWide(cu, rl_dest, rl_result);
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
                                RegLocation rl_src) {
   RegisterClass rcSrc = kFPReg;
   X86OpCode op = kX86Nop;
@@ -153,194 +151,193 @@
       op = kX86Cvtsi2sdRR;
       break;
     case Instruction::FLOAT_TO_INT: {
-      rl_src = LoadValue(cu, rl_src, kFPReg);
+      rl_src = LoadValue(rl_src, kFPReg);
       src_reg = rl_src.low_reg;
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-      ClobberSReg(cu, rl_dest.s_reg_low);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      int temp_reg = AllocTempFloat(cu);
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempFloat();
 
-      LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
-      NewLIR2(cu, kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
-      NewLIR2(cu, kX86ComissRR, src_reg, temp_reg);
-      LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
-      LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
-      NewLIR2(cu, kX86Cvttss2siRR, rl_result.low_reg, src_reg);
-      LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
-      branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
-      NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
-      branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
-      branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
-      StoreValue(cu, rl_dest, rl_result);
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComissRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
       return;
     }
     case Instruction::DOUBLE_TO_INT: {
-      rl_src = LoadValueWide(cu, rl_src, kFPReg);
+      rl_src = LoadValueWide(rl_src, kFPReg);
       src_reg = rl_src.low_reg;
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-      ClobberSReg(cu, rl_dest.s_reg_low);
-      rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-      int temp_reg = AllocTempDouble(cu) | X86_FP_DOUBLE;
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
 
-      LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
-      NewLIR2(cu, kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
-      NewLIR2(cu, kX86ComisdRR, src_reg, temp_reg);
-      LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
-      LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
-      NewLIR2(cu, kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
-      LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
-      branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
-      NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
-      branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
-      branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
-      StoreValue(cu, rl_dest, rl_result);
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComisdRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
       return;
     }
     case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
       return;
     case Instruction::LONG_TO_FLOAT:
       // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
       return;
     case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
       return;
     case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
       return;
     default:
       LOG(INFO) << "Unexpected opcode: " << opcode;
   }
   if (rl_src.wide) {
-    rl_src = LoadValueWide(cu, rl_src, rcSrc);
+    rl_src = LoadValueWide(rl_src, rcSrc);
     src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
   } else {
-    rl_src = LoadValue(cu, rl_src, rcSrc);
+    rl_src = LoadValue(rl_src, rcSrc);
     src_reg = rl_src.low_reg;
   }
   if (rl_dest.wide) {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
-    StoreValueWide(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
-    NewLIR2(cu, op, rl_result.low_reg, src_reg);
-    StoreValue(cu, rl_dest, rl_result);
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
-void X86Codegen::GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
+void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
                           RegLocation rl_src1, RegLocation rl_src2) {
   bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
   bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
   int src_reg1;
   int src_reg2;
   if (single) {
-    rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
     src_reg1 = rl_src1.low_reg;
-    rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
     src_reg2 = rl_src2.low_reg;
   } else {
-    rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
     src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
-    rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
     src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
   }
   // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-  ClobberSReg(cu, rl_dest.s_reg_low);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  LoadConstantNoClobber(cu, rl_result.low_reg, unordered_gt ? 1 : 0);
+  ClobberSReg(rl_dest.s_reg_low);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
   if (single) {
-    NewLIR2(cu, kX86UcomissRR, src_reg1, src_reg2);
+    NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
   } else {
-    NewLIR2(cu, kX86UcomisdRR, src_reg1, src_reg2);
+    NewLIR2(kX86UcomisdRR, src_reg1, src_reg2);
   }
   LIR* branch = NULL;
   if (unordered_gt) {
-    branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+    branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
   }
   // If the result reg can't be byte accessed, use a jump and move instead of a set.
   if (rl_result.low_reg >= 4) {
     LIR* branch2 = NULL;
     if (unordered_gt) {
-      branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
-      NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x0);
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
     } else {
-      branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondBe);
-      NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x1);
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
     }
-    branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+    branch2->target = NewLIR0(kPseudoTargetLabel);
   } else {
-    NewLIR2(cu, kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
   }
-  NewLIR2(cu, kX86Sbb32RI, rl_result.low_reg, 0);
+  NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
   if (unordered_gt) {
-    branch->target = NewLIR0(cu, kPseudoTargetLabel);
+    branch->target = NewLIR0(kPseudoTargetLabel);
   }
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
-void X86Codegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double) {
-  LIR* label_list = cu->block_label_list;
-  LIR* taken = &label_list[bb->taken->id];
-  LIR* not_taken = &label_list[bb->fall_through->id];
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
   LIR* branch = NULL;
   RegLocation rl_src1;
   RegLocation rl_src2;
   if (is_double) {
-    rl_src1 = GetSrcWide(cu, mir, 0);
-    rl_src2 = GetSrcWide(cu, mir, 2);
-    rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
-    NewLIR2(cu, kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
             S2d(rl_src2.low_reg, rl_src2.high_reg));
   } else {
-    rl_src1 = GetSrc(cu, mir, 0);
-    rl_src2 = GetSrc(cu, mir, 1);
-    rl_src1 = LoadValue(cu, rl_src1, kFPReg);
-    rl_src2 = LoadValue(cu, rl_src2, kFPReg);
-    NewLIR2(cu, kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+    rl_src1 = mir_graph_->GetSrc(mir, 0);
+    rl_src2 = mir_graph_->GetSrc(mir, 1);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
   }
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
   switch (ccode) {
     case kCondEq:
       if (!gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = not_taken;
       }
       break;
     case kCondNe:
       if (!gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = taken;
       }
       break;
     case kCondLt:
       if (gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = not_taken;
       }
       ccode = kCondCs;
       break;
     case kCondLe:
       if (gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = not_taken;
       }
       ccode = kCondLs;
       break;
     case kCondGt:
       if (gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = taken;
       }
       ccode = kCondHi;
       break;
     case kCondGe:
       if (gt_bias) {
-        branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = taken;
       }
       ccode = kCondCc;
@@ -348,30 +345,30 @@
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpCondBranch(cu, ccode, taken);
+  OpCondBranch(ccode, taken);
 }
 
-void X86Codegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValue(cu, rl_src, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+  StoreValue(rl_dest, rl_result);
 }
 
-void X86Codegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
 {
   RegLocation rl_result;
-  rl_src = LoadValueWide(cu, rl_src, kCoreReg);
-  rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
-  OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
-  StoreValueWide(cu, rl_dest, rl_result);
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-bool X86Codegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
-  DCHECK_NE(cu->instruction_set, kThumb2);
+bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
   return false;
 }
 
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index 984eaef..9c72ad9 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -17,8 +17,6 @@
 /* This file contains codegen for the X86 ISA */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "x86_lir.h"
 
 namespace art {
@@ -26,15 +24,15 @@
 /*
  * Perform register memory operation.
  */
-LIR* X86Codegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
                                 int reg1, int base, int offset, ThrowKind kind)
 {
-  LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
-                    cu->current_dalvik_offset, reg1, base, offset);
-  OpRegMem(cu, kOpCmp, reg1, base, offset);
-  LIR* branch = OpCondBranch(cu, c_code, tgt);
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
+                    current_dalvik_offset_, reg1, base, offset);
+  OpRegMem(kOpCmp, reg1, base, offset);
+  LIR* branch = OpCondBranch(c_code, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
   return branch;
 }
 
@@ -44,25 +42,25 @@
  *    x < y     return -1
  *    x > y     return  1
  */
-void X86Codegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) - (r3:r2)
-  OpRegReg(cu, kOpSub, r0, r2);  // r0 = r0 - r2
-  OpRegReg(cu, kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
-  NewLIR2(cu, kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
-  NewLIR2(cu, kX86Movzx8RR, r2, r2);
-  OpReg(cu, kOpNeg, r2);         // r2 = -r2
-  OpRegReg(cu, kOpOr, r0, r1);   // r0 = high | low - sets ZF
-  NewLIR2(cu, kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
-  NewLIR2(cu, kX86Movzx8RR, r0, r0);
-  OpRegReg(cu, kOpOr, r0, r2);   // r0 = r0 | r2
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  NewLIR2(kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r2, r2);
+  OpReg(kOpNeg, r2);         // r2 = -r2
+  OpRegReg(kOpOr, r0, r1);   // r0 = high | low - sets ZF
+  NewLIR2(kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r0, r0);
+  OpRegReg(kOpOr, r0, r2);   // r0 = r0 | r2
   RegLocation rl_result = LocCReturn();
-  StoreValue(cu, rl_dest, rl_result);
+  StoreValue(rl_dest, rl_result);
 }
 
 X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
@@ -87,37 +85,37 @@
   return kX86CondO;
 }
 
-LIR* X86Codegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
                              LIR* target)
 {
-  NewLIR2(cu, kX86Cmp32RR, src1, src2);
+  NewLIR2(kX86Cmp32RR, src1, src2);
   X86ConditionCode cc = X86ConditionEncoding(cond);
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
                         cc);
   branch->target = target;
   return branch;
 }
 
-LIR* X86Codegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
                                 int check_value, LIR* target)
 {
   if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
     // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
-    NewLIR2(cu, kX86Test32RR, reg, reg);
+    NewLIR2(kX86Test32RR, reg, reg);
   } else {
-    NewLIR2(cu, IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
+    NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
   }
   X86ConditionCode cc = X86ConditionEncoding(cond);
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
   branch->target = target;
   return branch;
 }
 
-LIR* X86Codegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
 {
   if (X86_FPREG(r_dest) || X86_FPREG(r_src))
-    return OpFpRegCopy(cu, r_dest, r_src);
-  LIR* res = RawLIR(cu, cu->current_dalvik_offset, kX86Mov32RR,
+    return OpFpRegCopy(r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
                     r_dest, r_src);
   if (r_dest == r_src) {
     res->flags.is_nop = true;
@@ -125,14 +123,14 @@
   return res;
 }
 
-LIR* X86Codegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src)
 {
-  LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
-  AppendLIR(cu, res);
+  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
   return res;
 }
 
-void X86Codegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
                                int src_lo, int src_hi)
 {
   bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
@@ -141,62 +139,61 @@
   assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
   if (dest_fp) {
     if (src_fp) {
-      OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
     } else {
       // TODO: Prevent this from happening in the code. The result is often
       // unused or could have been loaded more easily from memory.
-      NewLIR2(cu, kX86MovdxrRR, dest_lo, src_lo);
-      NewLIR2(cu, kX86MovdxrRR, dest_hi, src_hi);
-      NewLIR2(cu, kX86PsllqRI, dest_hi, 32);
-      NewLIR2(cu, kX86OrpsRR, dest_lo, dest_hi);
+      NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
+      NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
+      NewLIR2(kX86PsllqRI, dest_hi, 32);
+      NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
     }
   } else {
     if (src_fp) {
-      NewLIR2(cu, kX86MovdrxRR, dest_lo, src_lo);
-      NewLIR2(cu, kX86PsrlqRI, src_lo, 32);
-      NewLIR2(cu, kX86MovdrxRR, dest_hi, src_lo);
+      NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
+      NewLIR2(kX86PsrlqRI, src_lo, 32);
+      NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
     } else {
       // Handle overlap
       if (src_hi == dest_lo) {
-        OpRegCopy(cu, dest_hi, src_hi);
-        OpRegCopy(cu, dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
       } else {
-        OpRegCopy(cu, dest_lo, src_lo);
-        OpRegCopy(cu, dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
       }
     }
   }
 }
 
-void X86Codegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
 }
 
-void X86Codegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
-  LIR* label_list = cu->block_label_list;
-  LIR* taken = &label_list[bb->taken->id];
-  RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
-  RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+  LIR* taken = &block_label_list_[bb->taken->id];
+  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
   // Swap operands and condition code to prevent use of zero flag.
   if (ccode == kCondLe || ccode == kCondGt) {
     // Compute (r3:r2) = (r3:r2) - (r1:r0)
-    OpRegReg(cu, kOpSub, r2, r0);  // r2 = r2 - r0
-    OpRegReg(cu, kOpSbc, r3, r1);  // r3 = r3 - r1 - CF
+    OpRegReg(kOpSub, r2, r0);  // r2 = r2 - r0
+    OpRegReg(kOpSbc, r3, r1);  // r3 = r3 - r1 - CF
   } else {
     // Compute (r1:r0) = (r1:r0) - (r3:r2)
-    OpRegReg(cu, kOpSub, r0, r2);  // r0 = r0 - r2
-    OpRegReg(cu, kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+    OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+    OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
   }
   switch (ccode) {
     case kCondEq:
     case kCondNe:
-      OpRegReg(cu, kOpOr, r0, r1);  // r0 = r0 | r1
+      OpRegReg(kOpOr, r0, r1);  // r0 = r0 | r1
       break;
     case kCondLe:
       ccode = kCondGe;
@@ -210,229 +207,229 @@
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
   }
-  OpCondBranch(cu, ccode, taken);
+  OpCondBranch(ccode, taken);
 }
 
-RegLocation X86Codegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
                                      int lit, bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
   return rl_dest;
 }
 
-RegLocation X86Codegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
                                   int reg_hi, bool is_div)
 {
   LOG(FATAL) << "Unexpected use of GenDivRem for x86";
   return rl_dest;
 }
 
-bool X86Codegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
 {
-  DCHECK_EQ(cu->instruction_set, kX86);
+  DCHECK_EQ(cu_->instruction_set, kX86);
   RegLocation rl_src1 = info->args[0];
   RegLocation rl_src2 = info->args[1];
-  rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
-  rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
-  RegLocation rl_dest = InlineTarget(cu, info);
-  RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
-  OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
-  DCHECK_EQ(cu->instruction_set, kX86);
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
-  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
-  LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
-  branch->target = NewLIR0(cu, kPseudoTargetLabel);
-  OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
-  branch2->target = NewLIR0(cu, kPseudoTargetLabel);
-  StoreValue(cu, rl_dest, rl_result);
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  DCHECK_EQ(cu_->instruction_set, kX86);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
   return true;
 }
 
-void X86Codegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
 {
-  NewLIR5(cu, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+  NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
 }
 
-void X86Codegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void X86Mir2Lir::OpTlsCmp(int offset, int val)
 {
-  NewLIR2(cu, kX86Cmp16TI8, offset, val);
+  NewLIR2(kX86Cmp16TI8, offset, val);
 }
 
-bool X86Codegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
-  DCHECK_NE(cu->instruction_set, kThumb2);
+bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
   return false;
 }
 
-LIR* X86Codegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
   LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
   return NULL;
 }
 
-LIR* X86Codegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Mir2Lir::OpVldm(int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVldm for x86";
   return NULL;
 }
 
-LIR* X86Codegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Mir2Lir::OpVstm(int rBase, int count)
 {
   LOG(FATAL) << "Unexpected use of OpVstm for x86";
   return NULL;
 }
 
-void X86Codegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit)
 {
-  int t_reg = AllocTemp(cu);
-  OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
-  OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
-  FreeTemp(cu, t_reg);
+  int t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+  FreeTemp(t_reg);
   if (first_bit != 0) {
-    OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
   }
 }
 
-void X86Codegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
 {
-  int t_reg = AllocTemp(cu);
-  OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
-  GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
-  FreeTemp(cu, t_reg);
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
+  FreeTemp(t_reg);
 }
 
 // Test suspend flag, return target of taken suspend branch
-LIR* X86Codegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* X86Mir2Lir::OpTestSuspend(LIR* target)
 {
-  OpTlsCmp(cu, Thread::ThreadFlagsOffset().Int32Value(), 0);
-  return OpCondBranch(cu, (target == NULL) ? kCondNe : kCondEq, target);
+  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
 }
 
 // Decrement register and branch on condition
-LIR* X86Codegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
 {
-  OpRegImm(cu, kOpSub, reg, 1);
-  return OpCmpImmBranch(cu, c_code, reg, 0, target);
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
 }
 
-bool X86Codegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
                                     RegLocation rl_src, RegLocation rl_dest, int lit)
 {
   LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
   return false;
 }
 
-LIR* X86Codegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
+LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide)
 {
   LOG(FATAL) << "Unexpected use of OpIT in x86";
   return NULL;
 }
 
-void X86Codegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   LOG(FATAL) << "Unexpected use of GenX86Long for x86";
 }
-void X86Codegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
                          RegLocation rl_src2)
 {
   // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
   // enough.
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) + (r2:r3)
-  OpRegReg(cu, kOpAdd, r0, r2);  // r0 = r0 + r2
-  OpRegReg(cu, kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
+  OpRegReg(kOpAdd, r0, r2);  // r0 = r0 + r2
+  OpRegReg(kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
   // enough.
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) + (r2:r3)
-  OpRegReg(cu, kOpSub, r0, r2);  // r0 = r0 - r2
-  OpRegReg(cu, kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2)
 {
   // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
   // enough.
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) & (r2:r3)
-  OpRegReg(cu, kOpAnd, r0, r2);  // r0 = r0 & r2
-  OpRegReg(cu, kOpAnd, r1, r3);  // r1 = r1 & r3
+  OpRegReg(kOpAnd, r0, r2);  // r0 = r0 & r2
+  OpRegReg(kOpAnd, r1, r3);  // r1 = r1 & r3
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+void X86Mir2Lir::GenOrLong(RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2)
 {
   // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
   // enough.
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) | (r2:r3)
-  OpRegReg(cu, kOpOr, r0, r2);  // r0 = r0 | r2
-  OpRegReg(cu, kOpOr, r1, r3);  // r1 = r1 | r3
+  OpRegReg(kOpOr, r0, r2);  // r0 = r0 | r2
+  OpRegReg(kOpOr, r1, r3);  // r1 = r1 | r3
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+void X86Mir2Lir::GenXorLong(RegLocation rl_dest,
                             RegLocation rl_src1, RegLocation rl_src2)
 {
   // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
   // enough.
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
-  LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
   // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
-  OpRegReg(cu, kOpXor, r0, r2);  // r0 = r0 ^ r2
-  OpRegReg(cu, kOpXor, r1, r3);  // r1 = r1 ^ r3
+  OpRegReg(kOpXor, r0, r2);  // r0 = r0 ^ r2
+  OpRegReg(kOpXor, r1, r3);  // r1 = r1 ^ r3
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
 {
-  FlushAllRegs(cu);
-  LockCallTemps(cu);  // Prepare for explicit register usage
-  LoadValueDirectWideFixed(cu, rl_src, r0, r1);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src, r0, r1);
   // Compute (r1:r0) = -(r1:r0)
-  OpRegReg(cu, kOpNeg, r0, r0);  // r0 = -r0
-  OpRegImm(cu, kOpAdc, r1, 0);   // r1 = r1 + CF
-  OpRegReg(cu, kOpNeg, r1, r1);  // r1 = -r1
+  OpRegReg(kOpNeg, r0, r0);  // r0 = -r0
+  OpRegImm(kOpAdc, r1, 0);   // r1 = r1 + CF
+  OpRegReg(kOpNeg, r1, r1);  // r1 = -r1
   RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
                           INVALID_SREG, INVALID_SREG};
-  StoreValueWide(cu, rl_dest, rl_result);
+  StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Codegen::OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
@@ -441,21 +438,21 @@
     LOG(FATAL) << "Bad opcode: " << op;
     break;
   }
-  NewLIR2(cu, opcode, r_dest, thread_offset);
+  NewLIR2(opcode, r_dest, thread_offset);
 }
 
 /*
  * Generate array load
  */
-void X86Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_dest, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset;
   RegLocation rl_result;
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
-  rl_index = LoadValue(cu, rl_index, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
 
   if (size == kLong || size == kDouble) {
     data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
@@ -464,30 +461,30 @@
   }
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
     /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
-    GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg,
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
                    len_offset, kThrowArrayBounds);
   }
   if ((size == kLong) || (size == kDouble)) {
-    int reg_addr = AllocTemp(cu);
-    OpLea(cu, reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
-    FreeTemp(cu, rl_array.low_reg);
-    FreeTemp(cu, rl_index.low_reg);
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
-    LoadBaseIndexedDisp(cu, reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+    int reg_addr = AllocTemp();
+    OpLea(reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+    FreeTemp(rl_array.low_reg);
+    FreeTemp(rl_index.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+    LoadBaseIndexedDisp(reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
                         rl_result.high_reg, size, INVALID_SREG);
-    StoreValueWide(cu, rl_dest, rl_result);
+    StoreValueWide(rl_dest, rl_result);
   } else {
-    rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
 
-    LoadBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale,
+    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
                         data_offset, rl_result.low_reg, INVALID_REG, size,
                         INVALID_SREG);
 
-    StoreValue(cu, rl_dest, rl_result);
+    StoreValue(rl_dest, rl_result);
   }
 }
 
@@ -495,7 +492,7 @@
  * Generate array store
  *
  */
-void X86Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
                           RegLocation rl_index, RegLocation rl_src, int scale)
 {
   RegisterClass reg_class = oat_reg_class_by_size(size);
@@ -508,29 +505,29 @@
     data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
   }
 
-  rl_array = LoadValue(cu, rl_array, kCoreReg);
-  rl_index = LoadValue(cu, rl_index, kCoreReg);
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
 
   /* null object? */
-  GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
     /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
-    GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
   }
   if ((size == kLong) || (size == kDouble)) {
-    rl_src = LoadValueWide(cu, rl_src, reg_class);
+    rl_src = LoadValueWide(rl_src, reg_class);
   } else {
-    rl_src = LoadValue(cu, rl_src, reg_class);
+    rl_src = LoadValue(rl_src, reg_class);
   }
   // If the src reg can't be byte accessed, move it to a temp first.
   if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
-    int temp = AllocTemp(cu);
-    OpRegCopy(cu, temp, rl_src.low_reg);
-    StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+    int temp = AllocTemp();
+    OpRegCopy(temp, rl_src.low_reg);
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
                          INVALID_REG, size, INVALID_SREG);
   } else {
-    StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
                          rl_src.high_reg, size, INVALID_SREG);
   }
 }
@@ -539,69 +536,69 @@
  * Generate array store
  *
  */
-void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
                              RegLocation rl_index, RegLocation rl_src, int scale)
 {
   int len_offset = mirror::Array::LengthOffset().Int32Value();
   int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
 
-  FlushAllRegs(cu);  // Use explicit registers
-  LockCallTemps(cu);
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
 
   int r_value = TargetReg(kArg0);  // Register holding value
   int r_array_class = TargetReg(kArg1);  // Register holding array's Class
   int r_array = TargetReg(kArg2);  // Register holding array
   int r_index = TargetReg(kArg3);  // Register holding index into array
 
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Grab array
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Grab value
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Grab index
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
 
-  GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
 
   // Store of null?
-  LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
 
   // Get the array's class.
-  LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
-  LoadValueDirectFixed(cu, rl_array, r_array);  // Reload array
-  LoadValueDirectFixed(cu, rl_index, r_index);  // Reload index
-  LoadValueDirectFixed(cu, rl_src, r_value);  // Reload value
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
   r_array_class = INVALID_REG;
 
   // Branch here if value to be stored == null
-  LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
   null_value_check->target = target;
 
   // make an extra temp available for card mark below
-  FreeTemp(cu, TargetReg(kArg1));
+  FreeTemp(TargetReg(kArg1));
   if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
     /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
-    GenRegMemCheck(cu, kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
+    GenRegMemCheck(kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
   }
-  StoreBaseIndexedDisp(cu, r_array, r_index, scale,
+  StoreBaseIndexedDisp(r_array, r_index, scale,
                        data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
-  FreeTemp(cu, r_index);
-  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
-    MarkGCCard(cu, r_value, r_array);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
   }
 }
 
-void X86Codegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_shift)
 {
   // Default implementation is just to ignore the constant case.
-  GenShiftOpLong(cu, opcode, rl_dest, rl_src1, rl_shift);
+  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
 }
 
-void X86Codegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
 {
   // Default - bail to non-const handler.
-  GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index ed07220..20074f1 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -15,9 +15,7 @@
  */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "x86_lir.h"
 
 #include <string>
@@ -46,32 +44,32 @@
 #endif
 };
 
-RegLocation X86Codegen::LocCReturn()
+RegLocation X86Mir2Lir::LocCReturn()
 {
   RegLocation res = X86_LOC_C_RETURN;
   return res;
 }
 
-RegLocation X86Codegen::LocCReturnWide()
+RegLocation X86Mir2Lir::LocCReturnWide()
 {
   RegLocation res = X86_LOC_C_RETURN_WIDE;
   return res;
 }
 
-RegLocation X86Codegen::LocCReturnFloat()
+RegLocation X86Mir2Lir::LocCReturnFloat()
 {
   RegLocation res = X86_LOC_C_RETURN_FLOAT;
   return res;
 }
 
-RegLocation X86Codegen::LocCReturnDouble()
+RegLocation X86Mir2Lir::LocCReturnDouble()
 {
   RegLocation res = X86_LOC_C_RETURN_DOUBLE;
   return res;
 }
 
 // Return a target-dependent special register.
-int X86Codegen::TargetReg(SpecialTargetRegister reg) {
+int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
   int res = INVALID_REG;
   switch (reg) {
     case kSelf: res = rX86_SELF; break;
@@ -96,19 +94,19 @@
 }
 
 // Create a double from a pair of singles.
-int X86Codegen::S2d(int low_reg, int high_reg)
+int X86Mir2Lir::S2d(int low_reg, int high_reg)
 {
   return X86_S2D(low_reg, high_reg);
 }
 
 // Return mask to strip off fp reg flags and bias.
-uint32_t X86Codegen::FpRegMask()
+uint32_t X86Mir2Lir::FpRegMask()
 {
   return X86_FP_REG_MASK;
 }
 
 // True if both regs single, both core or both double.
-bool X86Codegen::SameRegType(int reg1, int reg2)
+bool X86Mir2Lir::SameRegType(int reg1, int reg2)
 {
   return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
 }
@@ -116,7 +114,7 @@
 /*
  * Decode the register id.
  */
-uint64_t X86Codegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t X86Mir2Lir::GetRegMaskCommon(int reg)
 {
   uint64_t seed;
   int shift;
@@ -132,7 +130,7 @@
   return (seed << shift);
 }
 
-uint64_t X86Codegen::GetPCUseDefEncoding()
+uint64_t X86Mir2Lir::GetPCUseDefEncoding()
 {
   /*
    * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
@@ -142,12 +140,12 @@
   return 0ULL;
 }
 
-void X86Codegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir)
 {
-  DCHECK_EQ(cu->instruction_set, kX86);
+  DCHECK_EQ(cu_->instruction_set, kX86);
 
   // X86-specific resource map setup here.
-  uint64_t flags = X86Codegen::EncodingMap[lir->opcode].flags;
+  uint64_t flags = X86Mir2Lir::EncodingMap[lir->opcode].flags;
 
   if (flags & REG_USE_SP) {
     lir->use_mask |= ENCODE_X86_REG_SP;
@@ -158,22 +156,22 @@
   }
 
   if (flags & REG_DEFA) {
-    SetupRegMask(cu, &lir->def_mask, rAX);
+    SetupRegMask(&lir->def_mask, rAX);
   }
 
   if (flags & REG_DEFD) {
-    SetupRegMask(cu, &lir->def_mask, rDX);
+    SetupRegMask(&lir->def_mask, rDX);
   }
   if (flags & REG_USEA) {
-    SetupRegMask(cu, &lir->use_mask, rAX);
+    SetupRegMask(&lir->use_mask, rAX);
   }
 
   if (flags & REG_USEC) {
-    SetupRegMask(cu, &lir->use_mask, rCX);
+    SetupRegMask(&lir->use_mask, rCX);
   }
 
   if (flags & REG_USED) {
-    SetupRegMask(cu, &lir->use_mask, rDX);
+    SetupRegMask(&lir->use_mask, rDX);
   }
 }
 
@@ -206,7 +204,7 @@
  * Interpret a format string and build a string no longer than size
  * See format key in Assemble.cc.
  */
-std::string X86Codegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
   std::string buf;
   size_t i = 0;
   size_t fmt_len = strlen(fmt);
@@ -264,7 +262,7 @@
   return buf;
 }
 
-void X86Codegen::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
 {
   char buf[256];
   buf[0] = 0;
@@ -306,10 +304,10 @@
   }
 }
 
-void X86Codegen::AdjustSpillMask(CompilationUnit* cu) {
+void X86Mir2Lir::AdjustSpillMask() {
   // Adjustment for LR spilling, x86 has no LR so nothing to do here
-  cu->core_spill_mask |= (1 << rRET);
-  cu->num_core_spills++;
+  core_spill_mask_ |= (1 << rRET);
+  num_core_spills_++;
 }
 
 /*
@@ -318,7 +316,7 @@
  * include any holes in the mask.  Associate holes with
  * Dalvik register INVALID_VREG (0xFFFFU).
  */
-void X86Codegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg)
 {
   UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
 #if 0
@@ -326,10 +324,10 @@
 #endif
 }
 
-void X86Codegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void X86Mir2Lir::FlushRegWide(int reg1, int reg2)
 {
-  RegisterInfo* info1 = GetRegInfo(cu, reg1);
-  RegisterInfo* info2 = GetRegInfo(cu, reg2);
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
   DCHECK(info1 && info2 && info1->pair && info2->pair &&
          (info1->partner == info2->reg) &&
          (info2->partner == info1->reg));
@@ -341,93 +339,93 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
+    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
-    StoreBaseDispWide(cu, rX86_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
   }
 }
 
-void X86Codegen::FlushReg(CompilationUnit* cu, int reg)
+void X86Mir2Lir::FlushReg(int reg)
 {
-  RegisterInfo* info = GetRegInfo(cu, reg);
+  RegisterInfo* info = GetRegInfo(reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
-    StoreBaseDisp(cu, rX86_SP, VRegOffset(cu, v_reg), reg, kWord);
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
   }
 }
 
 /* Give access to the target-dependent FP register encoding to common code */
-bool X86Codegen::IsFpReg(int reg) {
+bool X86Mir2Lir::IsFpReg(int reg) {
   return X86_FPREG(reg);
 }
 
 /* Clobber all regs that might be used by an external C call */
-void X86Codegen::ClobberCalleeSave(CompilationUnit *cu)
+void X86Mir2Lir::ClobberCalleeSave()
 {
-  Clobber(cu, rAX);
-  Clobber(cu, rCX);
-  Clobber(cu, rDX);
+  Clobber(rAX);
+  Clobber(rCX);
+  Clobber(rDX);
 }
 
-RegLocation X86Codegen::GetReturnWideAlt(CompilationUnit* cu) {
+RegLocation X86Mir2Lir::GetReturnWideAlt() {
   RegLocation res = LocCReturnWide();
   CHECK(res.low_reg == rAX);
   CHECK(res.high_reg == rDX);
-  Clobber(cu, rAX);
-  Clobber(cu, rDX);
-  MarkInUse(cu, rAX);
-  MarkInUse(cu, rDX);
-  MarkPair(cu, res.low_reg, res.high_reg);
+  Clobber(rAX);
+  Clobber(rDX);
+  MarkInUse(rAX);
+  MarkInUse(rDX);
+  MarkPair(res.low_reg, res.high_reg);
   return res;
 }
 
-RegLocation X86Codegen::GetReturnAlt(CompilationUnit* cu)
+RegLocation X86Mir2Lir::GetReturnAlt()
 {
   RegLocation res = LocCReturn();
   res.low_reg = rDX;
-  Clobber(cu, rDX);
-  MarkInUse(cu, rDX);
+  Clobber(rDX);
+  MarkInUse(rDX);
   return res;
 }
 
-RegisterInfo* X86Codegen::GetRegInfo(CompilationUnit* cu, int reg)
+X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg)
 {
-  return X86_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & X86_FP_REG_MASK]
-                    : &cu->reg_pool->core_regs[reg];
+  return X86_FPREG(reg) ? &reg_pool_->FPRegs[reg & X86_FP_REG_MASK]
+                    : &reg_pool_->core_regs[reg];
 }
 
 /* To be used when explicitly managing register use */
-void X86Codegen::LockCallTemps(CompilationUnit* cu)
+void X86Mir2Lir::LockCallTemps()
 {
-  LockTemp(cu, rX86_ARG0);
-  LockTemp(cu, rX86_ARG1);
-  LockTemp(cu, rX86_ARG2);
-  LockTemp(cu, rX86_ARG3);
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
+  LockTemp(rX86_ARG3);
 }
 
 /* To be used when explicitly managing register use */
-void X86Codegen::FreeCallTemps(CompilationUnit* cu)
+void X86Mir2Lir::FreeCallTemps()
 {
-  FreeTemp(cu, rX86_ARG0);
-  FreeTemp(cu, rX86_ARG1);
-  FreeTemp(cu, rX86_ARG2);
-  FreeTemp(cu, rX86_ARG3);
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
+  FreeTemp(rX86_ARG3);
 }
 
-void X86Codegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
 {
 #if ANDROID_SMP != 0
   // TODO: optimize fences
-  NewLIR0(cu, kX86Mfence);
+  NewLIR0(kX86Mfence);
 #endif
 }
 /*
  * Alloc a pair of core registers, or a double.  Low reg in low byte,
  * high reg in next byte.
  */
-int X86Codegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
                           int reg_class)
 {
   int high_reg;
@@ -435,136 +433,135 @@
   int res = 0;
 
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    low_reg = AllocTempDouble(cu);
+    low_reg = AllocTempDouble();
     high_reg = low_reg + 1;
     res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
     return res;
   }
 
-  low_reg = AllocTemp(cu);
-  high_reg = AllocTemp(cu);
+  low_reg = AllocTemp();
+  high_reg = AllocTemp();
   res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
   return res;
 }
 
-int X86Codegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
+int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    return AllocTempFloat(cu);
+    return AllocTempFloat();
   }
-  return AllocTemp(cu);
+  return AllocTemp();
 }
 
-void X86Codegen::CompilerInitializeRegAlloc(CompilationUnit* cu) {
+void X86Mir2Lir::CompilerInitializeRegAlloc() {
   int num_regs = sizeof(core_regs)/sizeof(*core_regs);
   int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  RegisterPool *pool =
-      static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
-  cu->reg_pool = pool;
-  pool->num_core_regs = num_regs;
-  pool->core_regs =
-      static_cast<RegisterInfo*>(NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs),
+  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs =
+      static_cast<RegisterInfo*>(NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs),
                                              true, kAllocRegAlloc));
-  pool->num_fp_regs = num_fp_regs;
-  pool->FPRegs =
-      static_cast<RegisterInfo *>(NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs),
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs =
+      static_cast<RegisterInfo *>(NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs),
                                               true, kAllocRegAlloc));
-  CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
-  CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
   for (int i = 0; i < num_reserved; i++) {
-    MarkInUse(cu, ReservedRegs[i]);
+    MarkInUse(ReservedRegs[i]);
   }
   // Mark temp regs - all others not in use can be used for promotion
   for (int i = 0; i < num_temps; i++) {
-    MarkTemp(cu, core_temps[i]);
+    MarkTemp(core_temps[i]);
   }
   for (int i = 0; i < num_fp_temps; i++) {
-    MarkTemp(cu, fp_temps[i]);
+    MarkTemp(fp_temps[i]);
   }
 }
 
-void X86Codegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
                      RegLocation rl_free)
 {
   if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
       (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
     // No overlap, free both
-    FreeTemp(cu, rl_free.low_reg);
-    FreeTemp(cu, rl_free.high_reg);
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
   }
 }
 
-void X86Codegen::SpillCoreRegs(CompilationUnit* cu) {
-  if (cu->num_core_spills == 0) {
+void X86Mir2Lir::SpillCoreRegs() {
+  if (num_core_spills_ == 0) {
     return;
   }
   // Spill mask not including fake return address register
-  uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
-  int offset = cu->frame_size - (4 * cu->num_core_spills);
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
-      StoreWordDisp(cu, rX86_SP, offset, reg);
+      StoreWordDisp(rX86_SP, offset, reg);
       offset += 4;
     }
   }
 }
 
-void X86Codegen::UnSpillCoreRegs(CompilationUnit* cu) {
-  if (cu->num_core_spills == 0) {
+void X86Mir2Lir::UnSpillCoreRegs() {
+  if (num_core_spills_ == 0) {
     return;
   }
   // Spill mask not including fake return address register
-  uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
-  int offset = cu->frame_size - (4 * cu->num_core_spills);
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
   for (int reg = 0; mask; mask >>= 1, reg++) {
     if (mask & 0x1) {
-      LoadWordDisp(cu, rX86_SP, offset, reg);
+      LoadWordDisp(rX86_SP, offset, reg);
       offset += 4;
     }
   }
 }
 
-bool X86Codegen::IsUnconditionalBranch(LIR* lir)
+bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir)
 {
   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
 }
 
-/* Common initialization routine for an architecture family */
-bool InitX86Codegen(CompilationUnit* cu) {
-  cu->cg.reset(new X86Codegen());
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
   for (int i = 0; i < kX86Last; i++) {
-    if (X86Codegen::EncodingMap[i].opcode != i) {
-      LOG(FATAL) << "Encoding order for " << X86Codegen::EncodingMap[i].name
+    if (X86Mir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
                  << " is wrong: expecting " << i << ", seeing "
-                 << static_cast<int>(X86Codegen::EncodingMap[i].opcode);
+                 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
     }
   }
-  return true;
+}
+
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
+  return new X86Mir2Lir(cu, mir_graph);
 }
 
 // Not used in x86
-int X86Codegen::LoadHelper(CompilationUnit* cu, int offset)
+int X86Mir2Lir::LoadHelper(int offset)
 {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return INVALID_REG;
 }
 
-uint64_t X86Codegen::GetTargetInstFlags(int opcode)
+uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode)
 {
-  return X86Codegen::EncodingMap[opcode].flags;
+  return X86Mir2Lir::EncodingMap[opcode].flags;
 }
 
-const char* X86Codegen::GetTargetInstName(int opcode)
+const char* X86Mir2Lir::GetTargetInstName(int opcode)
 {
-  return X86Codegen::EncodingMap[opcode].name;
+  return X86Mir2Lir::EncodingMap[opcode].name;
 }
 
-const char* X86Codegen::GetTargetInstFmt(int opcode)
+const char* X86Mir2Lir::GetTargetInstFmt(int opcode)
 {
-  return X86Codegen::EncodingMap[opcode].fmt;
+  return X86Mir2Lir::EncodingMap[opcode].fmt;
 }
 
 } // namespace art
diff --git a/src/compiler/dex/quick/x86/utility_x86.cc b/src/compiler/dex/quick/x86/utility_x86.cc
index fa40635..45c0e9c 100644
--- a/src/compiler/dex/quick/x86/utility_x86.cc
+++ b/src/compiler/dex/quick/x86/utility_x86.cc
@@ -15,15 +15,13 @@
  */
 
 #include "codegen_x86.h"
-#include "compiler/dex/quick/codegen_util.h"
-#include "compiler/dex/quick/ralloc_util.h"
 #include "x86_lir.h"
 
 namespace art {
 
 /* This file contains codegen for the X86 ISA */
 
-LIR* X86Codegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src)
 {
   int opcode;
   /* must be both DOUBLE or both not DOUBLE */
@@ -43,29 +41,29 @@
     }
   }
   DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
-  LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
   if (r_dest == r_src) {
     res->flags.is_nop = true;
   }
   return res;
 }
 
-bool X86Codegen::InexpensiveConstantInt(int32_t value)
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value)
 {
   return true;
 }
 
-bool X86Codegen::InexpensiveConstantFloat(int32_t value)
+bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value)
 {
   return false;
 }
 
-bool X86Codegen::InexpensiveConstantLong(int64_t value)
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value)
 {
   return true;
 }
 
-bool X86Codegen::InexpensiveConstantDouble(int64_t value)
+bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value)
 {
   return false; // TUNING
 }
@@ -79,49 +77,49 @@
  * 1) r_dest is freshly returned from AllocTemp or
  * 2) The codegen is under fixed register usage
  */
-LIR* X86Codegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value)
 {
   int r_dest_save = r_dest;
   if (X86_FPREG(r_dest)) {
     if (value == 0) {
-      return NewLIR2(cu, kX86XorpsRR, r_dest, r_dest);
+      return NewLIR2(kX86XorpsRR, r_dest, r_dest);
     }
     DCHECK(X86_SINGLEREG(r_dest));
-    r_dest = AllocTemp(cu);
+    r_dest = AllocTemp();
   }
 
   LIR *res;
   if (value == 0) {
-    res = NewLIR2(cu, kX86Xor32RR, r_dest, r_dest);
+    res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
   } else {
     // Note, there is no byte immediate form of a 32 bit immediate move.
-    res = NewLIR2(cu, kX86Mov32RI, r_dest, value);
+    res = NewLIR2(kX86Mov32RI, r_dest, value);
   }
 
   if (X86_FPREG(r_dest_save)) {
-    NewLIR2(cu, kX86MovdxrRR, r_dest_save, r_dest);
-    FreeTemp(cu, r_dest);
+    NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
+    FreeTemp(r_dest);
   }
 
   return res;
 }
 
-LIR* X86Codegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target)
 {
-  LIR* res = NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
   res->target = target;
   return res;
 }
 
-LIR* X86Codegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
 {
-  LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* offset to be patched */,
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
                         X86ConditionEncoding(cc));
   branch->target = target;
   return branch;
 }
 
-LIR* X86Codegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -131,10 +129,10 @@
     default:
       LOG(FATAL) << "Bad case in OpReg " << op;
   }
-  return NewLIR1(cu, opcode, r_dest_src);
+  return NewLIR1(opcode, r_dest_src);
 }
 
-LIR* X86Codegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
+LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
 {
   X86OpCode opcode = kX86Bkpt;
   bool byte_imm = IS_SIMM8(value);
@@ -151,28 +149,28 @@
     case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
     case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
     case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
-    case kOpMov: return LoadConstantNoClobber(cu, r_dest_src1, value);
+    case kOpMov: return LoadConstantNoClobber(r_dest_src1, value);
     case kOpMul:
       opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
-      return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, value);
+      return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
     default:
       LOG(FATAL) << "Bad case in OpRegImm " << op;
   }
-  return NewLIR2(cu, opcode, r_dest_src1, value);
+  return NewLIR2(opcode, r_dest_src1, value);
 }
 
-LIR* X86Codegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
 {
     X86OpCode opcode = kX86Nop;
     bool src2_must_be_cx = false;
     switch (op) {
         // X86 unary opcodes
       case kOpMvn:
-        OpRegCopy(cu, r_dest_src1, r_src2);
-        return OpReg(cu, kOpNot, r_dest_src1);
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNot, r_dest_src1);
       case kOpNeg:
-        OpRegCopy(cu, r_dest_src1, r_src2);
-        return OpReg(cu, kOpNeg, r_dest_src1);
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNeg, r_dest_src1);
         // X86 binary opcodes
       case kOpSub: opcode = kX86Sub32RR; break;
       case kOpSbc: opcode = kX86Sbb32RR; break;
@@ -189,9 +187,9 @@
       case kOp2Byte:
         // Use shifts instead of a byte operand if the source can't be byte accessed.
         if (r_src2 >= 4) {
-          NewLIR2(cu, kX86Mov32RR, r_dest_src1, r_src2);
-          NewLIR2(cu, kX86Sal32RI, r_dest_src1, 24);
-          return NewLIR2(cu, kX86Sar32RI, r_dest_src1, 24);
+          NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
+          NewLIR2(kX86Sal32RI, r_dest_src1, 24);
+          return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
         } else {
           opcode = kX86Movsx8RR;
         }
@@ -204,10 +202,10 @@
         break;
     }
     CHECK(!src2_must_be_cx || r_src2 == rCX);
-    return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+    return NewLIR2(opcode, r_dest_src1, r_src2);
 }
 
-LIR* X86Codegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
               int offset)
 {
   X86OpCode opcode = kX86Nop;
@@ -228,43 +226,43 @@
       LOG(FATAL) << "Bad case in OpRegMem " << op;
       break;
   }
-  return NewLIR3(cu, opcode, r_dest, rBase, offset);
+  return NewLIR3(opcode, r_dest, rBase, offset);
 }
 
-LIR* X86Codegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
+LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
                  int r_src2)
 {
   if (r_dest != r_src1 && r_dest != r_src2) {
     if (op == kOpAdd) { // lea special case, except can't encode rbp as base
       if (r_src1 == r_src2) {
-        OpRegCopy(cu, r_dest, r_src1);
-        return OpRegImm(cu, kOpLsl, r_dest, 1);
+        OpRegCopy(r_dest, r_src1);
+        return OpRegImm(kOpLsl, r_dest, 1);
       } else if (r_src1 != rBP) {
-        return NewLIR5(cu, kX86Lea32RA, r_dest, r_src1 /* base */,
+        return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
                        r_src2 /* index */, 0 /* scale */, 0 /* disp */);
       } else {
-        return NewLIR5(cu, kX86Lea32RA, r_dest, r_src2 /* base */,
+        return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
                        r_src1 /* index */, 0 /* scale */, 0 /* disp */);
       }
     } else {
-      OpRegCopy(cu, r_dest, r_src1);
-      return OpRegReg(cu, op, r_dest, r_src2);
+      OpRegCopy(r_dest, r_src1);
+      return OpRegReg(op, r_dest, r_src2);
     }
   } else if (r_dest == r_src1) {
-    return OpRegReg(cu, op, r_dest, r_src2);
+    return OpRegReg(op, r_dest, r_src2);
   } else {  // r_dest == r_src2
     switch (op) {
       case kOpSub:  // non-commutative
-        OpReg(cu, kOpNeg, r_dest);
+        OpReg(kOpNeg, r_dest);
         op = kOpAdd;
         break;
       case kOpSbc:
       case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
-        int t_reg = AllocTemp(cu);
-        OpRegCopy(cu, t_reg, r_src1);
-        OpRegReg(cu, op, t_reg, r_src2);
-        LIR* res = OpRegCopy(cu, r_dest, t_reg);
-        FreeTemp(cu, t_reg);
+        int t_reg = AllocTemp();
+        OpRegCopy(t_reg, r_src1);
+        OpRegReg(op, t_reg, r_src2);
+        LIR* res = OpRegCopy(r_dest, t_reg);
+        FreeTemp(t_reg);
         return res;
       }
       case kOpAdd:  // commutative
@@ -276,38 +274,38 @@
       default:
         LOG(FATAL) << "Bad case in OpRegRegReg " << op;
     }
-    return OpRegReg(cu, op, r_dest, r_src1);
+    return OpRegReg(op, r_dest, r_src1);
   }
 }
 
-LIR* X86Codegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
+LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
                  int value)
 {
   if (op == kOpMul) {
     X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
-    return NewLIR3(cu, opcode, r_dest, r_src, value);
+    return NewLIR3(opcode, r_dest, r_src, value);
   } else if (op == kOpAnd) {
     if (value == 0xFF && r_src < 4) {
-      return NewLIR2(cu, kX86Movzx8RR, r_dest, r_src);
+      return NewLIR2(kX86Movzx8RR, r_dest, r_src);
     } else if (value == 0xFFFF) {
-      return NewLIR2(cu, kX86Movzx16RR, r_dest, r_src);
+      return NewLIR2(kX86Movzx16RR, r_dest, r_src);
     }
   }
   if (r_dest != r_src) {
     if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
       // TODO: fix bug in LEA encoding when disp == 0
-      return NewLIR5(cu, kX86Lea32RA, r_dest,  r5sib_no_base /* base */,
+      return NewLIR5(kX86Lea32RA, r_dest,  r5sib_no_base /* base */,
                      r_src /* index */, value /* scale */, 0 /* disp */);
     } else if (op == kOpAdd) { // lea add special case
-      return NewLIR5(cu, kX86Lea32RA, r_dest, r_src /* base */,
+      return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
                      r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
     }
-    OpRegCopy(cu, r_dest, r_src);
+    OpRegCopy(r_dest, r_src);
   }
-  return OpRegImm(cu, op, r_dest, value);
+  return OpRegImm(op, r_dest, value);
 }
 
-LIR* X86Codegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -316,10 +314,10 @@
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
-  return NewLIR1(cu, opcode, thread_offset);
+  return NewLIR1(opcode, thread_offset);
 }
 
-LIR* X86Codegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp)
 {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
@@ -328,10 +326,10 @@
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
-  return NewLIR2(cu, opcode, rBase, disp);
+  return NewLIR2(opcode, rBase, disp);
 }
 
-LIR* X86Codegen::LoadConstantWide(CompilationUnit *cu, int r_dest_lo, int r_dest_hi, int64_t value)
+LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
 {
     int32_t val_lo = Low32Bits(value);
     int32_t val_hi = High32Bits(value);
@@ -339,27 +337,27 @@
     if (X86_FPREG(r_dest_lo)) {
       DCHECK(X86_FPREG(r_dest_hi));  // ignore r_dest_hi
       if (value == 0) {
-        return NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
+        return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
       } else {
         if (val_lo == 0) {
-          res = NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
+          res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
         } else {
-          res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+          res = LoadConstantNoClobber(r_dest_lo, val_lo);
         }
         if (val_hi != 0) {
-          LoadConstantNoClobber(cu, r_dest_hi, val_hi);
-          NewLIR2(cu, kX86PsllqRI, r_dest_hi, 32);
-          NewLIR2(cu, kX86OrpsRR, r_dest_lo, r_dest_hi);
+          LoadConstantNoClobber(r_dest_hi, val_hi);
+          NewLIR2(kX86PsllqRI, r_dest_hi, 32);
+          NewLIR2(kX86OrpsRR, r_dest_lo, r_dest_hi);
         }
       }
     } else {
-      res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
-      LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+      res = LoadConstantNoClobber(r_dest_lo, val_lo);
+      LoadConstantNoClobber(r_dest_hi, val_hi);
     }
     return res;
 }
 
-LIR* X86Codegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
                                      int displacement, int r_dest, int r_dest_hi, OpSize size,
                                      int s_reg) {
   LIR *load = NULL;
@@ -416,40 +414,40 @@
 
   if (!is_array) {
     if (!pair) {
-      load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+      load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
     } else {
       if (rBase == r_dest) {
-        load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
                         displacement + HIWORD_OFFSET);
-        load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
       } else {
-        load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
-        load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
                         displacement + HIWORD_OFFSET);
       }
     }
     if (rBase == rX86_SP) {
-      AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+      AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               true /* is_load */, is64bit);
       if (pair) {
-        AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
+        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
                                 true /* is_load */, is64bit);
       }
     }
   } else {
     if (!pair) {
-      load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+      load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
                      displacement + LOWORD_OFFSET);
     } else {
       if (rBase == r_dest) {
-        load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
                         displacement + HIWORD_OFFSET);
-        load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
                        displacement + LOWORD_OFFSET);
       } else {
-        load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
                        displacement + LOWORD_OFFSET);
-        load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
                         displacement + HIWORD_OFFSET);
       }
     }
@@ -459,25 +457,25 @@
 }
 
 /* Load value from base + scaled index. */
-LIR* X86Codegen::LoadBaseIndexed(CompilationUnit *cu, int rBase,
+LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
                      int r_index, int r_dest, int scale, OpSize size) {
-  return LoadBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+  return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
                              r_dest, INVALID_REG, size, INVALID_SREG);
 }
 
-LIR* X86Codegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
                   int r_dest, OpSize size, int s_reg) {
-  return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
                              r_dest, INVALID_REG, size, s_reg);
 }
 
-LIR* X86Codegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
                       int r_dest_lo, int r_dest_hi, int s_reg) {
-  return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
                              r_dest_lo, r_dest_hi, kLong, s_reg);
 }
 
-LIR* X86Codegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
                                       int displacement, int r_src, int r_src_hi, OpSize size,
                                       int s_reg) {
   LIR *store = NULL;
@@ -529,27 +527,27 @@
 
   if (!is_array) {
     if (!pair) {
-      store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
     } else {
-      store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
-      store2 = NewLIR3(cu, opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+      store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
     }
     if (rBase == rX86_SP) {
-      AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+      AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               false /* is_load */, is64bit);
       if (pair) {
-        AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
+        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
                                 false /* is_load */, is64bit);
       }
     }
   } else {
     if (!pair) {
-      store = NewLIR5(cu, opcode, rBase, r_index, scale,
+      store = NewLIR5(opcode, rBase, r_index, scale,
                       displacement + LOWORD_OFFSET, r_src);
     } else {
-      store = NewLIR5(cu, opcode, rBase, r_index, scale,
+      store = NewLIR5(opcode, rBase, r_index, scale,
                       displacement + LOWORD_OFFSET, r_src);
-      store2 = NewLIR5(cu, opcode, rBase, r_index, scale,
+      store2 = NewLIR5(opcode, rBase, r_index, scale,
                        displacement + HIWORD_OFFSET, r_src_hi);
     }
   }
@@ -558,25 +556,25 @@
 }
 
 /* store value base base + scaled index. */
-LIR* X86Codegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
                       int scale, OpSize size)
 {
-  return StoreBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+  return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
                               r_src, INVALID_REG, size, INVALID_SREG);
 }
 
-LIR* X86Codegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
                                int r_src, OpSize size)
 {
-    return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0,
+    return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
                                 displacement, r_src, INVALID_REG, size,
                                 INVALID_SREG);
 }
 
-LIR* X86Codegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
                                    int r_src_lo, int r_src_hi)
 {
-  return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+  return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
                               r_src_lo, r_src_hi, kLong, INVALID_SREG);
 }
 
diff --git a/src/compiler/dex/ssa_transformation.cc b/src/compiler/dex/ssa_transformation.cc
index b13cf05..d8341e3 100644
--- a/src/compiler/dex/ssa_transformation.cc
+++ b/src/compiler/dex/ssa_transformation.cc
@@ -17,6 +17,8 @@
 #include "compiler_internals.h"
 #include "dataflow_iterator.h"
 
+#define NOTVISITED (-1)
+
 namespace art {
 
 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
@@ -179,7 +181,7 @@
 
   /* hacky loop detection */
   if (bb->taken && IsBitSet(bb->dominators, bb->taken->id)) {
-    cu_->attributes |= METHOD_HAS_LOOP;
+    attributes_ |= METHOD_HAS_LOOP;
   }
 }
 
@@ -352,9 +354,6 @@
     DCHECK_NE(idom_dfs_idx, NOTVISITED);
     int i_dom_idx = dfs_post_order_.elem_list[idom_dfs_idx];
     BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
-    if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
-      DCHECK_EQ(bb->i_dom->id, i_dom->id);
-    }
     bb->i_dom = i_dom;
     /* Add bb to the i_dominated set of the immediate dominator block */
     SetBit(cu_, i_dom->i_dominated, bb->id);
@@ -674,45 +673,43 @@
   /* Compute the DFS order */
   ComputeDFSOrders();
 
-  if (!cu_->disable_dataflow) {
-    /* Compute the dominator info */
-    ComputeDominators();
-  }
+  /* Compute the dominator info */
+  ComputeDominators();
 
   /* Allocate data structures in preparation for SSA conversion */
   CompilerInitializeSSAConversion();
 
-  if (!cu_->disable_dataflow) {
-    /* Find out the "Dalvik reg def x block" relation */
-    ComputeDefBlockMatrix();
+  /* Find out the "Dalvik reg def x block" relation */
+  ComputeDefBlockMatrix();
 
-    /* Insert phi nodes to dominance frontiers for all variables */
-    InsertPhiNodes();
-  }
+  /* Insert phi nodes to dominance frontiers for all variables */
+  InsertPhiNodes();
 
   /* Rename register names by local defs and phi nodes */
-  AllNodesIterator iter(this, false /* not iterative */);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  AllNodesIterator iter1(this, false /* not iterative */);
+  for (BasicBlock* bb = iter1.Next(); bb != NULL; bb = iter1.Next()) {
     ClearVisitedFlag(bb);
   }
   DoDFSPreOrderSSARename(GetEntryBlock());
 
-  if (!cu_->disable_dataflow) {
-    /*
-     * Shared temp bit vector used by each block to count the number of defs
-     * from all the predecessor blocks.
-     */
-    temp_ssa_register_v_ = AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
+  /*
+   * Shared temp bit vector used by each block to count the number of defs
+   * from all the predecessor blocks.
+   */
+  temp_ssa_register_v_ = AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
 
-    /* Insert phi-operands with latest SSA names from predecessor blocks */
-    ReachableNodesIterator iter(this, false /* not iterative */);
-    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
-      InsertPhiNodeOperands(bb);
-    }
+  /* Insert phi-operands with latest SSA names from predecessor blocks */
+  ReachableNodesIterator iter2(this, false /* not iterative */);
+  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    InsertPhiNodeOperands(bb);
   }
+
   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
     DumpCFG("/sdcard/3_post_ssa_cfg/", false);
   }
+  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
+    VerifyDataflow();
+  }
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/vreg_analysis.cc b/src/compiler/dex/vreg_analysis.cc
index 08f1f35..36daaea 100644
--- a/src/compiler/dex/vreg_analysis.cc
+++ b/src/compiler/dex/vreg_analysis.cc
@@ -16,53 +16,52 @@
 
 #include "compiler_internals.h"
 #include "dataflow_iterator.h"
-#include "quick/ralloc_util.h"
 
 namespace art {
 
-static bool SetFp(CompilationUnit* cu, int index, bool is_fp) {
+bool MIRGraph::SetFp(int index, bool is_fp) {
   bool change = false;
-  if (is_fp && !cu->reg_location[index].fp) {
-    cu->reg_location[index].fp = true;
-    cu->reg_location[index].defined = true;
+  if (is_fp && !reg_location_[index].fp) {
+    reg_location_[index].fp = true;
+    reg_location_[index].defined = true;
     change = true;
   }
   return change;
 }
 
-static bool SetCore(CompilationUnit* cu, int index, bool is_core) {
+bool MIRGraph::SetCore(int index, bool is_core) {
   bool change = false;
-  if (is_core && !cu->reg_location[index].defined) {
-    cu->reg_location[index].core = true;
-    cu->reg_location[index].defined = true;
+  if (is_core && !reg_location_[index].defined) {
+    reg_location_[index].core = true;
+    reg_location_[index].defined = true;
     change = true;
   }
   return change;
 }
 
-static bool SetRef(CompilationUnit* cu, int index, bool is_ref) {
+bool MIRGraph::SetRef(int index, bool is_ref) {
   bool change = false;
-  if (is_ref && !cu->reg_location[index].defined) {
-    cu->reg_location[index].ref = true;
-    cu->reg_location[index].defined = true;
+  if (is_ref && !reg_location_[index].defined) {
+    reg_location_[index].ref = true;
+    reg_location_[index].defined = true;
     change = true;
   }
   return change;
 }
 
-static bool SetWide(CompilationUnit* cu, int index, bool is_wide) {
+bool MIRGraph::SetWide(int index, bool is_wide) {
   bool change = false;
-  if (is_wide && !cu->reg_location[index].wide) {
-    cu->reg_location[index].wide = true;
+  if (is_wide && !reg_location_[index].wide) {
+    reg_location_[index].wide = true;
     change = true;
   }
   return change;
 }
 
-static bool SetHigh(CompilationUnit* cu, int index, bool is_high) {
+bool MIRGraph::SetHigh(int index, bool is_high) {
   bool change = false;
-  if (is_high && !cu->reg_location[index].high_word) {
-    cu->reg_location[index].high_word = true;
+  if (is_high && !reg_location_[index].high_word) {
+    reg_location_[index].high_word = true;
     change = true;
   }
   return change;
@@ -85,20 +84,20 @@
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     SSARepresentation *ssa_rep = mir->ssa_rep;
     if (ssa_rep) {
-      int attrs = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+      int attrs = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
       // Handle defs
       if (attrs & DF_DA) {
         if (attrs & DF_CORE_A) {
-          changed |= SetCore(cu_, ssa_rep->defs[0], true);
+          changed |= SetCore(ssa_rep->defs[0], true);
         }
         if (attrs & DF_REF_A) {
-          changed |= SetRef(cu_, ssa_rep->defs[0], true);
+          changed |= SetRef(ssa_rep->defs[0], true);
         }
         if (attrs & DF_A_WIDE) {
-          cu_->reg_location[ssa_rep->defs[0]].wide = true;
-          cu_->reg_location[ssa_rep->defs[1]].wide = true;
-          cu_->reg_location[ssa_rep->defs[1]].high_word = true;
+          reg_location_[ssa_rep->defs[0]].wide = true;
+          reg_location_[ssa_rep->defs[1]].wide = true;
+          reg_location_[ssa_rep->defs[1]].high_word = true;
           DCHECK_EQ(SRegToVReg(ssa_rep->defs[0])+1,
           SRegToVReg(ssa_rep->defs[1]));
         }
@@ -108,15 +107,15 @@
       int next = 0;
       if (attrs & DF_UA) {
         if (attrs & DF_CORE_A) {
-          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+          changed |= SetCore(ssa_rep->uses[next], true);
         }
         if (attrs & DF_REF_A) {
-          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+          changed |= SetRef(ssa_rep->uses[next], true);
         }
         if (attrs & DF_A_WIDE) {
-          cu_->reg_location[ssa_rep->uses[next]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
           DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
           SRegToVReg(ssa_rep->uses[next + 1]));
           next += 2;
@@ -126,15 +125,15 @@
       }
       if (attrs & DF_UB) {
         if (attrs & DF_CORE_B) {
-          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+          changed |= SetCore(ssa_rep->uses[next], true);
         }
         if (attrs & DF_REF_B) {
-          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+          changed |= SetRef(ssa_rep->uses[next], true);
         }
         if (attrs & DF_B_WIDE) {
-          cu_->reg_location[ssa_rep->uses[next]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
           DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
                                SRegToVReg(ssa_rep->uses[next + 1]));
           next += 2;
@@ -144,15 +143,15 @@
       }
       if (attrs & DF_UC) {
         if (attrs & DF_CORE_C) {
-          changed |= SetCore(cu_, ssa_rep->uses[next], true);
+          changed |= SetCore(ssa_rep->uses[next], true);
         }
         if (attrs & DF_REF_C) {
-          changed |= SetRef(cu_, ssa_rep->uses[next], true);
+          changed |= SetRef(ssa_rep->uses[next], true);
         }
         if (attrs & DF_C_WIDE) {
-          cu_->reg_location[ssa_rep->uses[next]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].wide = true;
-          cu_->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+          reg_location_[ssa_rep->uses[next]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].wide = true;
+          reg_location_[ssa_rep->uses[next + 1]].high_word = true;
           DCHECK_EQ(SRegToVReg(ssa_rep->uses[next])+1,
           SRegToVReg(ssa_rep->uses[next + 1]));
         }
@@ -164,27 +163,27 @@
           (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
         switch(cu_->shorty[0]) {
             case 'I':
-              changed |= SetCore(cu_, ssa_rep->uses[0], true);
+              changed |= SetCore(ssa_rep->uses[0], true);
               break;
             case 'J':
-              changed |= SetCore(cu_, ssa_rep->uses[0], true);
-              changed |= SetCore(cu_, ssa_rep->uses[1], true);
-              cu_->reg_location[ssa_rep->uses[0]].wide = true;
-              cu_->reg_location[ssa_rep->uses[1]].wide = true;
-              cu_->reg_location[ssa_rep->uses[1]].high_word = true;
+              changed |= SetCore(ssa_rep->uses[0], true);
+              changed |= SetCore(ssa_rep->uses[1], true);
+              reg_location_[ssa_rep->uses[0]].wide = true;
+              reg_location_[ssa_rep->uses[1]].wide = true;
+              reg_location_[ssa_rep->uses[1]].high_word = true;
               break;
             case 'F':
-              changed |= SetFp(cu_, ssa_rep->uses[0], true);
+              changed |= SetFp(ssa_rep->uses[0], true);
               break;
             case 'D':
-              changed |= SetFp(cu_, ssa_rep->uses[0], true);
-              changed |= SetFp(cu_, ssa_rep->uses[1], true);
-              cu_->reg_location[ssa_rep->uses[0]].wide = true;
-              cu_->reg_location[ssa_rep->uses[1]].wide = true;
-              cu_->reg_location[ssa_rep->uses[1]].high_word = true;
+              changed |= SetFp(ssa_rep->uses[0], true);
+              changed |= SetFp(ssa_rep->uses[1], true);
+              reg_location_[ssa_rep->uses[0]].wide = true;
+              reg_location_[ssa_rep->uses[1]].wide = true;
+              reg_location_[ssa_rep->uses[1]].high_word = true;
               break;
             case 'L':
-              changed |= SetRef(cu_, ssa_rep->uses[0], true);
+              changed |= SetRef(ssa_rep->uses[0], true);
               break;
             default: break;
         }
@@ -198,7 +197,7 @@
           (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
         DCHECK_EQ(next, 0);
         int target_idx = mir->dalvikInsn.vB;
-        const char* shorty = GetShortyFromTargetIdx(cu_, target_idx);
+        const char* shorty = GetShortyFromTargetIdx(target_idx);
         // Handle result type if floating point
         if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
           MIR* move_result_mir = FindMoveResult(bb, mir);
@@ -208,10 +207,10 @@
             SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
             DCHECK(tgt_rep != NULL);
             tgt_rep->fp_def[0] = true;
-            changed |= SetFp(cu_, tgt_rep->defs[0], true);
+            changed |= SetFp(tgt_rep->defs[0], true);
             if (shorty[0] == 'D') {
               tgt_rep->fp_def[1] = true;
-              changed |= SetFp(cu_, tgt_rep->defs[1], true);
+              changed |= SetFp(tgt_rep->defs[1], true);
             }
           }
         }
@@ -219,8 +218,8 @@
         // If this is a non-static invoke, mark implicit "this"
         if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
             (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
-          cu_->reg_location[ssa_rep->uses[next]].defined = true;
-          cu_->reg_location[ssa_rep->uses[next]].ref = true;
+          reg_location_[ssa_rep->uses[next]].defined = true;
+          reg_location_[ssa_rep->uses[next]].ref = true;
           next++;
         }
         uint32_t cpos = 1;
@@ -231,28 +230,28 @@
               case 'D':
                 ssa_rep->fp_use[i] = true;
                 ssa_rep->fp_use[i+1] = true;
-                cu_->reg_location[ssa_rep->uses[i]].wide = true;
-                cu_->reg_location[ssa_rep->uses[i+1]].wide = true;
-                cu_->reg_location[ssa_rep->uses[i+1]].high_word = true;
+                reg_location_[ssa_rep->uses[i]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].high_word = true;
                 DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
                 i++;
                 break;
               case 'J':
-                cu_->reg_location[ssa_rep->uses[i]].wide = true;
-                cu_->reg_location[ssa_rep->uses[i+1]].wide = true;
-                cu_->reg_location[ssa_rep->uses[i+1]].high_word = true;
+                reg_location_[ssa_rep->uses[i]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].wide = true;
+                reg_location_[ssa_rep->uses[i+1]].high_word = true;
                 DCHECK_EQ(SRegToVReg(ssa_rep->uses[i])+1, SRegToVReg(ssa_rep->uses[i+1]));
-                changed |= SetCore(cu_, ssa_rep->uses[i],true);
+                changed |= SetCore(ssa_rep->uses[i],true);
                 i++;
                 break;
               case 'F':
                 ssa_rep->fp_use[i] = true;
                 break;
               case 'L':
-                changed |= SetRef(cu_,ssa_rep->uses[i], true);
+                changed |= SetRef(ssa_rep->uses[i], true);
                 break;
               default:
-                changed |= SetCore(cu_,ssa_rep->uses[i], true);
+                changed |= SetCore(ssa_rep->uses[i], true);
                 break;
             }
             i++;
@@ -262,11 +261,11 @@
 
       for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
         if (ssa_rep->fp_use[i])
-          changed |= SetFp(cu_, ssa_rep->uses[i], true);
+          changed |= SetFp(ssa_rep->uses[i], true);
         }
       for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
         if (ssa_rep->fp_def[i])
-          changed |= SetFp(cu_, ssa_rep->defs[i], true);
+          changed |= SetFp(ssa_rep->defs[i], true);
         }
       // Special-case handling for moves & Phi
       if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
@@ -278,14 +277,14 @@
          */
         bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
                       kMirOpPhi);
-        RegLocation rl_temp = cu_->reg_location[ssa_rep->defs[0]];
+        RegLocation rl_temp = reg_location_[ssa_rep->defs[0]];
         bool defined_fp = rl_temp.defined && rl_temp.fp;
         bool defined_core = rl_temp.defined && rl_temp.core;
         bool defined_ref = rl_temp.defined && rl_temp.ref;
         bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
         bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
         for (int i = 0; i < ssa_rep->num_uses;i++) {
-          rl_temp = cu_->reg_location[ssa_rep->uses[i]];
+          rl_temp = reg_location_[ssa_rep->uses[i]];
           defined_fp |= rl_temp.defined && rl_temp.fp;
           defined_core |= rl_temp.defined && rl_temp.core;
           defined_ref |= rl_temp.defined && rl_temp.ref;
@@ -313,26 +312,26 @@
                        << " has both fp and core/ref uses for same def.";
           cu_->disable_opt |= (1 << kPromoteRegs);
         }
-        changed |= SetFp(cu_, ssa_rep->defs[0], defined_fp);
-        changed |= SetCore(cu_, ssa_rep->defs[0], defined_core);
-        changed |= SetRef(cu_, ssa_rep->defs[0], defined_ref);
-        changed |= SetWide(cu_, ssa_rep->defs[0], is_wide);
-        changed |= SetHigh(cu_, ssa_rep->defs[0], is_high);
+        changed |= SetFp(ssa_rep->defs[0], defined_fp);
+        changed |= SetCore(ssa_rep->defs[0], defined_core);
+        changed |= SetRef(ssa_rep->defs[0], defined_ref);
+        changed |= SetWide(ssa_rep->defs[0], is_wide);
+        changed |= SetHigh(ssa_rep->defs[0], is_high);
         if (attrs & DF_A_WIDE) {
-          changed |= SetWide(cu_, ssa_rep->defs[1], true);
-          changed |= SetHigh(cu_, ssa_rep->defs[1], true);
+          changed |= SetWide(ssa_rep->defs[1], true);
+          changed |= SetHigh(ssa_rep->defs[1], true);
         }
         for (int i = 0; i < ssa_rep->num_uses; i++) {
-          changed |= SetFp(cu_, ssa_rep->uses[i], defined_fp);
-          changed |= SetCore(cu_, ssa_rep->uses[i], defined_core);
-          changed |= SetRef(cu_, ssa_rep->uses[i], defined_ref);
-          changed |= SetWide(cu_, ssa_rep->uses[i], is_wide);
-          changed |= SetHigh(cu_, ssa_rep->uses[i], is_high);
+          changed |= SetFp(ssa_rep->uses[i], defined_fp);
+          changed |= SetCore(ssa_rep->uses[i], defined_core);
+          changed |= SetRef(ssa_rep->uses[i], defined_ref);
+          changed |= SetWide(ssa_rep->uses[i], is_wide);
+          changed |= SetHigh(ssa_rep->uses[i], is_high);
         }
         if (attrs & DF_A_WIDE) {
           DCHECK_EQ(ssa_rep->num_uses, 2);
-          changed |= SetWide(cu_, ssa_rep->uses[1], true);
-          changed |= SetHigh(cu_, ssa_rep->uses[1], true);
+          changed |= SetWide(ssa_rep->uses[1], true);
+          changed |= SetHigh(ssa_rep->uses[1], true);
         }
       }
     }
@@ -344,7 +343,8 @@
 
 void MIRGraph::DumpRegLocTable(RegLocation* table, int count)
 {
-  Codegen* cg = cu_->cg.get();
+  //FIXME: Quick-specific.  Move to Quick (and make a generic version for MIRGraph?
+  Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
   if (cg != NULL) {
     for (int i = 0; i < count; i++) {
       LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
@@ -376,17 +376,6 @@
                                      INVALID_REG, INVALID_REG, INVALID_SREG,
                                      INVALID_SREG};
 
-int MIRGraph::ComputeFrameSize() {
-  /* Figure out the frame size */
-  static const uint32_t kAlignMask = kStackAlignment - 1;
-  uint32_t size = (cu_->num_core_spills + cu_->num_fp_spills +
-                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
-                   cu_->num_compiler_temps + 1 /* cur_method* */)
-                   * sizeof(uint32_t);
-  /* Align and set */
-  return (size + kAlignMask) & ~(kAlignMask);
-}
-
 /*
  * Simple register allocation.  Some Dalvik virtual registers may
  * be promoted to physical registers.  Most of the work for temp
@@ -408,21 +397,17 @@
   }
 
   /* Patch up the locations for Method* and the compiler temps */
-  loc[cu_->method_sreg].location = kLocCompilerTemp;
-  loc[cu_->method_sreg].defined = true;
+  loc[method_sreg_].location = kLocCompilerTemp;
+  loc[method_sreg_].defined = true;
   for (i = 0; i < cu_->num_compiler_temps; i++) {
-    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu_->compiler_temps.elem_list[i]);
+    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(compiler_temps_.elem_list[i]);
     loc[ct->s_reg].location = kLocCompilerTemp;
     loc[ct->s_reg].defined = true;
   }
 
-  cu_->reg_location = loc;
+  reg_location_ = loc;
 
-  /* Allocation the promotion map */
   int num_regs = cu_->num_dalvik_registers;
-  cu_->promotion_map = static_cast<PromotionMap*>
-      (NewMem(cu_, (num_regs + cu_->num_compiler_temps + 1) * sizeof(cu_->promotion_map[0]),
-              true, kAllocRegAlloc));
 
   /* Add types of incoming arguments based on signature */
   int num_ins = cu_->num_ins;
@@ -430,8 +415,8 @@
     int s_reg = num_regs - num_ins;
     if ((cu_->access_flags & kAccStatic) == 0) {
       // For non-static, skip past "this"
-      cu_->reg_location[s_reg].defined = true;
-      cu_->reg_location[s_reg].ref = true;
+      reg_location_[s_reg].defined = true;
+      reg_location_[s_reg].ref = true;
       s_reg++;
     }
     const char* shorty = cu_->shorty;
@@ -439,33 +424,33 @@
     for (int i = 1; i < shorty_len; i++) {
       switch (shorty[i]) {
         case 'D':
-          cu_->reg_location[s_reg].wide = true;
-          cu_->reg_location[s_reg+1].high_word = true;
-          cu_->reg_location[s_reg+1].fp = true;
+          reg_location_[s_reg].wide = true;
+          reg_location_[s_reg+1].high_word = true;
+          reg_location_[s_reg+1].fp = true;
           DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          cu_->reg_location[s_reg].fp = true;
-          cu_->reg_location[s_reg].defined = true;
+          reg_location_[s_reg].fp = true;
+          reg_location_[s_reg].defined = true;
           s_reg++;
           break;
         case 'J':
-          cu_->reg_location[s_reg].wide = true;
-          cu_->reg_location[s_reg+1].high_word = true;
+          reg_location_[s_reg].wide = true;
+          reg_location_[s_reg+1].high_word = true;
           DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
-          cu_->reg_location[s_reg].core = true;
-          cu_->reg_location[s_reg].defined = true;
+          reg_location_[s_reg].core = true;
+          reg_location_[s_reg].defined = true;
           s_reg++;
           break;
         case 'F':
-          cu_->reg_location[s_reg].fp = true;
-          cu_->reg_location[s_reg].defined = true;
+          reg_location_[s_reg].fp = true;
+          reg_location_[s_reg].defined = true;
           break;
         case 'L':
-          cu_->reg_location[s_reg].ref = true;
-          cu_->reg_location[s_reg].defined = true;
+          reg_location_[s_reg].ref = true;
+          reg_location_[s_reg].defined = true;
           break;
         default:
-          cu_->reg_location[s_reg].core = true;
-          cu_->reg_location[s_reg].defined = true;
+          reg_location_[s_reg].core = true;
+          reg_location_[s_reg].defined = true;
           break;
         }
         s_reg++;
@@ -485,10 +470,10 @@
    * allocator, remove this remapping.
    */
   for (i=0; i < GetNumSSARegs(); i++) {
-    if (cu_->reg_location[i].location != kLocCompilerTemp) {
-      int orig_sreg = cu_->reg_location[i].s_reg_low;
-      cu_->reg_location[i].orig_sreg = orig_sreg;
-      cu_->reg_location[i].s_reg_low = SRegToVReg(orig_sreg);
+    if (reg_location_[i].location != kLocCompilerTemp) {
+      int orig_sreg = reg_location_[i].s_reg_low;
+      reg_location_[i].orig_sreg = orig_sreg;
+      reg_location_[i].s_reg_low = SRegToVReg(orig_sreg);
     }
   }
 }
diff --git a/src/compiler/driver/compiler_driver.h b/src/compiler/driver/compiler_driver.h
index f398e31..af04cbf 100644
--- a/src/compiler/driver/compiler_driver.h
+++ b/src/compiler/driver/compiler_driver.h
@@ -41,7 +41,8 @@
 
 enum CompilerBackend {
   kQuick,
-  kPortable
+  kPortable,
+  kNoBackend
 };
 
 // Thread-local storage compiler worker threads
diff --git a/src/compiler/llvm/gbc_expander.cc b/src/compiler/llvm/gbc_expander.cc
index 24f2032..94eb741 100644
--- a/src/compiler/llvm/gbc_expander.cc
+++ b/src/compiler/llvm/gbc_expander.cc
@@ -28,7 +28,7 @@
 
 #include "compiler/dex/mir_graph.h"
 #include "compiler/dex/compiler_ir.h"
-#include "compiler/dex/quick/codegen.h"
+#include "compiler/dex/quick/mir_to_lir.h"
 using art::kMIRIgnoreNullCheck;
 using art::kMIRIgnoreRangeCheck;