Compiler: Spring cleaning

Significant restructuring of the Quick compiler to break out the
common frontend more cleanly.  Additional C++'ification.

The goal is to move from the monolithic structure of the old
JIT towards a more modular model in which components - in
particular the compiler backend - can be replaced.  This CL
focuses on moving MIR-related data from the CompilationUnit
struct into a new MIRGraph class.  The next CL will isolate all
LIR-related data and code down into the Quick backend.

This change will happen in multiple steps, and may look uglier
before it starts looking better.

Among the changes:

   o Moved all mir-related fields from CompilationUnit to new
     MirGraph class.

   o Moved the register promotion stuff into the Quick backend.

   o Deleted the GBC to LIR conversion code.

   o Replaced with old C-style function pointer dataflow analysis
     dispatcher with a basic block iterator class.

   o Renamed some files to make the name more consistent with what
     the code actually does.

   o Added the foundation for future inlining support.

   o Stripped out the remains of the old fingerprinting mechanism.

Change-Id: I6c30facc642f8084b1c7b2075cf7014de387aa56
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index d3a3a7c..a201fd8 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -28,7 +28,7 @@
 /* Return the position of an ssa name within the argument list */
 static int InPosition(CompilationUnit* cu, int s_reg)
 {
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   return v_reg - cu->num_regs;
 }
 
@@ -89,7 +89,7 @@
   int first_in = cu->num_regs;
   const int num_arg_regs = 3;  // TODO: generalize & move to RegUtil.cc
   for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-    int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
+    int v_reg = cu->mir_graph->SRegToVReg(mir->ssa_rep->uses[i]);
     int InPosition = v_reg - first_in;
     if (InPosition < num_arg_regs) {
       LockTemp(cu, rARM_ARG1 + InPosition);
@@ -324,7 +324,8 @@
  *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
  *   cbnz  r_idx, lp
  */
-void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -371,7 +372,8 @@
 }
 
 
-void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -588,7 +590,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
                             (static_cast<size_t>(cu->frame_size) <
                             Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index e77394c..29aef0e 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -150,9 +150,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/arm/int_arm.cc b/src/compiler/dex/quick/arm/int_arm.cc
index ed65636..0ebc943 100644
--- a/src/compiler/dex/quick/arm/int_arm.cc
+++ b/src/compiler/dex/quick/arm/int_arm.cc
@@ -187,11 +187,11 @@
   RegLocation rl_src = GetSrc(cu, mir, 0);
   // Temporary debugging code
   int dest_sreg = mir->ssa_rep->defs[0];
-  if ((dest_sreg < 0) || (dest_sreg >= cu->num_ssa_regs)) {
+  if ((dest_sreg < 0) || (dest_sreg >= cu->mir_graph->GetNumSSARegs())) {
     LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
               << PrettyMethod(cu->method_idx,*cu->dex_file);
     LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
-    LOG(INFO) << "vreg = " << SRegToVReg(cu, dest_sreg);
+    LOG(INFO) << "vreg = " << cu->mir_graph->SRegToVReg(dest_sreg);
     LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
     if (mir->ssa_rep->num_uses == 1) {
       LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
@@ -265,7 +265,7 @@
   if (rl_src2.is_const) {
     RegLocation rl_temp = UpdateLocWide(cu, rl_src2);
     // Do special compare/branch against simple const operand if not already in registers.
-    int64_t val = ConstantValueWide(cu, rl_src2);
+    int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
     if ((rl_temp.location != kLocPhysReg) &&
         ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
       GenFusedLongCmpImmBranch(cu, bb, rl_src1, val, ccode);
@@ -538,7 +538,7 @@
   RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
   RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
 
-  if (need_write_barrier && !IsConstantNullRef(cu, rl_new_value)) {
+  if (need_write_barrier && !cu->mir_graph->IsConstantNullRef(rl_new_value)) {
     // Mark card for object assuming new value is stored.
     MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
   }
@@ -678,7 +678,7 @@
 {
   DCHECK(rl_src.wide);
   DCHECK(rl_dest.wide);
-  return (abs(SRegToVReg(cu, rl_src.s_reg_low) - SRegToVReg(cu, rl_dest.s_reg_low)) == 1);
+  return (abs(cu->mir_graph->SRegToVReg(rl_src.s_reg_low) - cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)) == 1);
 }
 
 void ArmCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
@@ -809,7 +809,7 @@
 
   // If index is constant, just fold it into the data offset
   if (constant_index) {
-    data_offset += ConstantValue(cu, rl_index) << scale;
+    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
   }
 
   /* null object? */
@@ -837,7 +837,7 @@
 
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
         GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
@@ -895,7 +895,7 @@
 
   // If index is constant, just fold it into the data offset.
   if (constant_index) {
-    data_offset += ConstantValue(cu, rl_index) << scale;
+    data_offset += cu->mir_graph->ConstantValue(rl_index) << scale;
   }
 
   rl_array = LoadValue(cu, rl_array, kCoreReg);
@@ -937,7 +937,7 @@
     }
     if (needs_range_check) {
       if (constant_index) {
-        GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+        GenImmedCheck(cu, kCondLs, reg_len, cu->mir_graph->ConstantValue(rl_index), kThrowConstantArrayBounds);
       } else {
         GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
       }
@@ -1021,7 +1021,7 @@
   StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
   FreeTemp(cu, r_ptr);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
@@ -1031,7 +1031,7 @@
 {
   rl_src = LoadValueWide(cu, rl_src, kCoreReg);
   // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = ConstantValue(cu, rl_shift) & 0x3f;
+  int shift_amount = cu->mir_graph->ConstantValue(rl_shift) & 0x3f;
   if (shift_amount == 0) {
     StoreValueWide(cu, rl_dest, rl_src);
     return;
@@ -1123,7 +1123,7 @@
     return;
   }
   DCHECK(rl_src2.is_const);
-  int64_t val = ConstantValueWide(cu, rl_src2);
+  int64_t val = cu->mir_graph->ConstantValueWide(rl_src2);
   uint32_t val_lo = Low32Bits(val);
   uint32_t val_hi = High32Bits(val);
   int32_t mod_imm_lo = ModifiedImmediate(val_lo);
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index f03e07c..6d8102f 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -597,7 +597,7 @@
   }
 }
 /*
- * TUNING: is leaf?  Can't just use "has_invoke" to determine as some
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
  * instructions might call out to C/assembly helper functions.  Until
  * machinery is in place, always spill lr.
  */
@@ -645,10 +645,10 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) <
-      SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) <
+      cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -658,7 +658,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }
diff --git a/src/compiler/dex/quick/codegen.h b/src/compiler/dex/quick/codegen.h
index 21290ca..272ccad 100644
--- a/src/compiler/dex/quick/codegen.h
+++ b/src/compiler/dex/quick/codegen.h
@@ -105,8 +105,26 @@
 
   public:
 
+    struct SwitchTable {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int vaddr;                  // Dalvik offset of switch opcode.
+      LIR* anchor;                // Reference instruction for relative offsets.
+      LIR** targets;              // Array of case targets.
+    };
+
+    struct FillArrayData {
+      int offset;
+      const uint16_t* table;      // Original dex table.
+      int size;
+      int vaddr;                  // Dalvik offset of FILL_ARRAY_DATA opcode.
+    };
+
     virtual ~Codegen(){};
 
+    // Shared by all targets - implemented in ralloc_util.cc
+    void SimpleRegAlloc(CompilationUnit* cu);
+
     // Shared by all targets - implemented in gen_common.cc.
     void HandleSuspendLaunchPads(CompilationUnit *cu);
     void HandleIntrinsicLaunchPads(CompilationUnit *cu);
@@ -355,9 +373,9 @@
                                                int second_bit) = 0;
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src) = 0;
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src) = 0;
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case) = 0;
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index 24955f6..b5152df 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -30,15 +30,15 @@
   if (rl_src.is_const) {
     if (rl_src.wide) {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantDouble(ConstantValueWide(cu, rl_src));
+         res = cu->cg->InexpensiveConstantDouble(cu->mir_graph->ConstantValueWide(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantLong(ConstantValueWide(cu, rl_src));
+         res = cu->cg->InexpensiveConstantLong(cu->mir_graph->ConstantValueWide(rl_src));
       }
     } else {
       if (rl_src.fp) {
-         res = cu->cg->InexpensiveConstantFloat(ConstantValue(cu, rl_src));
+         res = cu->cg->InexpensiveConstantFloat(cu->mir_graph->ConstantValue(rl_src));
       } else {
-         res = cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src));
+         res = cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src));
       }
     }
   }
@@ -55,9 +55,8 @@
 bool FastInstance(CompilationUnit* cu,  uint32_t field_idx,
                   int& field_offset, bool& is_volatile, bool is_put)
 {
-  DexCompilationUnit m_unit(cu);
-  return cu->compiler_driver->ComputeInstanceFieldInfo(field_idx, &m_unit,
-           field_offset, is_volatile, is_put);
+  return cu->compiler_driver->ComputeInstanceFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, is_volatile, is_put);
 }
 
 /* Convert an instruction to a NOP */
@@ -336,7 +335,7 @@
   LOG(INFO) << "Dumping LIR insns for "
             << PrettyMethod(cu->method_idx, *cu->dex_file);
   LIR* lir_insn;
-  int insns_size = cu->insns_size;
+  int insns_size = cu->code_item->insns_size_in_code_units_;
 
   LOG(INFO) << "Regs (excluding ins) : " << cu->num_regs;
   LOG(INFO) << "Ins          : " << cu->num_ins;
@@ -595,7 +594,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable* tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext( &iterator));
+    Codegen::SwitchTable* tab_rec =
+      reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
     AlignBuffer(cu->code_buffer, tab_rec->offset);
     /*
@@ -654,8 +654,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->fill_array_data, &iterator);
   while (true) {
-    FillArrayData *tab_rec =
-        reinterpret_cast<FillArrayData*>(GrowableListIteratorNext( &iterator));
+    Codegen::FillArrayData *tab_rec =
+        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext( &iterator));
     if (tab_rec == NULL) break;
     AlignBuffer(cu->code_buffer, tab_rec->offset);
     for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
@@ -678,7 +678,8 @@
 static bool VerifyCatchEntries(CompilationUnit* cu)
 {
   bool success = true;
-  for (std::set<uint32_t>::const_iterator it = cu->catches.begin(); it != cu->catches.end(); ++it) {
+  for (std::set<uint32_t>::const_iterator it = cu->mir_graph->catches_.begin();
+       it != cu->mir_graph->catches_.end(); ++it) {
     uint32_t dex_pc = *it;
     bool found = false;
     for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
@@ -695,19 +696,20 @@
   // Now, try in the other direction
   for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
     uint32_t dex_pc = cu->dex2pcMappingTable[i+1];
-    if (cu->catches.find(dex_pc) == cu->catches.end()) {
+    if (cu->mir_graph->catches_.find(dex_pc) == cu->mir_graph->catches_.end()) {
       LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
       success = false;
     }
   }
   if (!success) {
     LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "Entries @ decode: " << cu->catches.size() << ", Entries in table: "
+    LOG(INFO) << "Entries @ decode: " << cu->mir_graph->catches_.size() << ", Entries in table: "
               << cu->dex2pcMappingTable.size()/2;
   }
   return success;
 }
 
+
 static void CreateMappingTables(CompilationUnit* cu)
 {
   for (LIR* tgt_lir = cu->first_lir_insn; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
@@ -720,7 +722,9 @@
       cu->dex2pcMappingTable.push_back(tgt_lir->dalvik_offset);
     }
   }
-  DCHECK(VerifyCatchEntries(cu));
+  if (kIsDebugBuild) {
+    DCHECK(VerifyCatchEntries(cu));
+  }
   cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size() +
                                         cu->dex2pcMappingTable.size());
   cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size());
@@ -850,7 +854,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Codegen::SwitchTable *tab_rec =
+        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -869,8 +874,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->fill_array_data, &iterator);
   while (true) {
-    FillArrayData *tab_rec =
-        reinterpret_cast<FillArrayData*>(GrowableListIteratorNext(&iterator));
+    Codegen::FillArrayData *tab_rec =
+        reinterpret_cast<Codegen::FillArrayData*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     offset += tab_rec->size;
@@ -938,6 +943,7 @@
 {
   Codegen* cg = cu->cg.get();
   AssignOffsets(cu);
+  int assembler_retries = 0;
   /*
    * Assemble here.  Note that we generate code with optimistic assumptions
    * and if found now to work, we'll have to redo the sequence and retry.
@@ -948,8 +954,8 @@
     if (res == kSuccess) {
       break;
     } else {
-      cu->assembler_retries++;
-      if (cu->assembler_retries > MAX_ASSEMBLER_RETRIES) {
+      assembler_retries++;
+      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
         CodegenDump(cu);
         LOG(FATAL) << "Assembler error - too many retries";
       }
@@ -996,7 +1002,7 @@
   return new_label;
 }
 
-static void MarkPackedCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+static void MarkPackedCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1008,7 +1014,7 @@
   }
 }
 
-static void MarkSparseCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+static void MarkSparseCaseLabels(CompilationUnit* cu, Codegen::SwitchTable *tab_rec)
 {
   const uint16_t* table = tab_rec->table;
   int base_vaddr = tab_rec->vaddr;
@@ -1025,8 +1031,8 @@
   GrowableListIterator iterator;
   GrowableListIteratorInit(&cu->switch_tables, &iterator);
   while (true) {
-    SwitchTable *tab_rec =
-        reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Codegen::SwitchTable *tab_rec =
+        reinterpret_cast<Codegen::SwitchTable*>(GrowableListIteratorNext(&iterator));
     if (tab_rec == NULL) break;
     if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
       MarkPackedCaseLabels(cu, tab_rec);
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index 3c4b111..652a448 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -16,6 +16,7 @@
 
 #include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "oat/runtime/oat_support_entrypoints.h"
 #include "ralloc_util.h"
 
@@ -132,9 +133,9 @@
     // If it's already live in a register or not easily materialized, just keep going
     RegLocation rl_temp = UpdateLoc(cu, rl_src2);
     if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(ConstantValue(cu, rl_src2))) {
+        InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src2))) {
       // OK - convert this to a compare immediate and branch
-      OpCmpImmBranch(cu, cond, rl_src1.low_reg, ConstantValue(cu, rl_src2), taken);
+      OpCmpImmBranch(cu, cond, rl_src1.low_reg, cu->mir_graph->ConstantValue(rl_src2), taken);
       OpUnconditionalBranch(cu, fall_through);
       return;
     }
@@ -353,14 +354,9 @@
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-
-  DexCompilationUnit m_unit(cu);
-
-  bool fast_path =
-      cu->compiler_driver->ComputeStaticFieldInfo(field_idx, &m_unit,
-                                                  field_offset, ssb_index,
-                                                  is_referrers_class, is_volatile,
-                                                  true);
+  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, true);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
@@ -424,7 +420,7 @@
     if (is_volatile) {
       GenMemBarrier(cu, kStoreLoad);
     }
-    if (is_object && !IsConstantNullRef(cu, rl_src)) {
+    if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
       MarkGCCard(cu, rl_src.low_reg, rBase);
     }
     FreeTemp(cu, rBase);
@@ -444,14 +440,9 @@
   int ssb_index;
   bool is_volatile;
   bool is_referrers_class;
-
-  DexCompilationUnit m_unit(cu);
-
-  bool fast_path =
-      cu->compiler_driver->ComputeStaticFieldInfo(field_idx, &m_unit,
-                                                  field_offset, ssb_index,
-                                                  is_referrers_class, is_volatile,
-                                                  false);
+  bool fast_path = cu->compiler_driver->ComputeStaticFieldInfo(
+      field_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
+      is_referrers_class, is_volatile, false);
   if (fast_path && !SLOW_FIELD_PATH) {
     DCHECK_GE(field_offset, 0);
     int rBase;
@@ -762,7 +753,7 @@
       if (is_volatile) {
         GenMemBarrier(cu, kLoadLoad);
       }
-      if (is_object && !IsConstantNullRef(cu, rl_src)) {
+      if (is_object && !cu->mir_graph->IsConstantNullRef(rl_src)) {
         MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
       }
     }
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index 1ae29be..c654143 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -1336,18 +1336,14 @@
   // Explicit register usage
   LockCallTemps(cu);
 
-  DexCompilationUnit m_unit(cu);
-
   uint32_t dex_method_idx = info->index;
   int vtable_idx;
   uintptr_t direct_code;
   uintptr_t direct_method;
   bool skip_this;
-  bool fast_path =
-    cu->compiler_driver->ComputeInvokeInfo(dex_method_idx, &m_unit, info->type,
-                                           vtable_idx, direct_code,
-                                           direct_method)
-    && !SLOW_INVOKE_PATH;
+  bool fast_path = cu->compiler_driver->ComputeInvokeInfo(
+      dex_method_idx, cu->mir_graph->GetCurrentDexCompilationUnit(), info->type, vtable_idx,
+      direct_code, direct_method) && !SLOW_INVOKE_PATH;
   if (info->type == kInterface) {
     if (fast_path) {
       p_null_ck = &null_ck;
@@ -1450,7 +1446,7 @@
                                   bool is_range)
 {
   CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
-  MIR* move_result_mir = FindMoveResult(cu, bb, mir);
+  MIR* move_result_mir = cu->mir_graph->FindMoveResult(bb, mir);
   if (move_result_mir == NULL) {
     info->result.location = kLocInvalid;
   } else {
diff --git a/src/compiler/dex/quick/gen_loadstore.cc b/src/compiler/dex/quick/gen_loadstore.cc
index a7baea4..7e116fc 100644
--- a/src/compiler/dex/quick/gen_loadstore.cc
+++ b/src/compiler/dex/quick/gen_loadstore.cc
@@ -16,6 +16,7 @@
 
 #include "compiler/dex/quick/codegen_util.h"
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "invoke_type.h"
 #include "ralloc_util.h"
 
@@ -50,9 +51,9 @@
       if (!cu->gen_bitcode) {
         // TUNING: We no longer have this info for QuickGBC - assume the worst
         bool used_as_reference = false;
-        int base_vreg = SRegToVReg(cu, rl_dest.s_reg_low);
-        for (int i = 0; !used_as_reference && (i < cu->num_ssa_regs); i++) {
-          if (SRegToVReg(cu, cu->reg_location[i].s_reg_low) == base_vreg) {
+        int base_vreg = cu->mir_graph->SRegToVReg(rl_dest.s_reg_low);
+        for (int i = 0; !used_as_reference && (i < cu->mir_graph->GetNumSSARegs()); i++) {
+          if (cu->mir_graph->SRegToVReg(cu->reg_location[i].s_reg_low) == base_vreg) {
             used_as_reference |= cu->reg_location[i].ref;
           }
         }
@@ -102,7 +103,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopy(cu, r_dest, rl_src.low_reg);
   } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantNoClobber(cu, r_dest, ConstantValue(cu, rl_src));
+    LoadConstantNoClobber(cu, r_dest, cu->mir_graph->ConstantValue(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
@@ -134,7 +135,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopyWide(cu, reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
   } else if (IsInexpensiveConstant(cu, rl_src)) {
-    LoadConstantWide(cu, reg_lo, reg_hi, ConstantValueWide(cu, rl_src));
+    LoadConstantWide(cu, reg_lo, reg_hi, cu->mir_graph->ConstantValueWide(rl_src));
   } else {
     DCHECK((rl_src.location == kLocDalvikFrame) ||
            (rl_src.location == kLocCompilerTemp));
@@ -171,16 +172,16 @@
 
 void Codegen::StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
-#ifndef NDEBUG
   /*
    * Sanity checking - should never try to store to the same
    * ssa name during the compilation of a single instruction
    * without an intervening ClobberSReg().
    */
-  DCHECK((cu->live_sreg == INVALID_SREG) ||
-         (rl_dest.s_reg_low != cu->live_sreg));
-  cu->live_sreg = rl_dest.s_reg_low;
-#endif
+  if (kIsDebugBuild) {
+    DCHECK((cu->live_sreg == INVALID_SREG) ||
+           (rl_dest.s_reg_low != cu->live_sreg));
+    cu->live_sreg = rl_dest.s_reg_low;
+  }
   LIR* def_start;
   LIR* def_end;
   DCHECK(!rl_dest.wide);
@@ -240,16 +241,16 @@
 
 void Codegen::StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
 {
-#ifndef NDEBUG
   /*
    * Sanity checking - should never try to store to the same
    * ssa name during the compilation of a single instruction
    * without an intervening ClobberSReg().
    */
-  DCHECK((cu->live_sreg == INVALID_SREG) ||
-      (rl_dest.s_reg_low != cu->live_sreg));
-  cu->live_sreg = rl_dest.s_reg_low;
-#endif
+  if (kIsDebugBuild) {
+    DCHECK((cu->live_sreg == INVALID_SREG) ||
+           (rl_dest.s_reg_low != cu->live_sreg));
+    cu->live_sreg = rl_dest.s_reg_low;
+  }
   LIR* def_start;
   LIR* def_end;
   DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
@@ -291,8 +292,8 @@
       (oat_live_out(cu, rl_dest.s_reg_low) ||
       oat_live_out(cu, GetSRegHi(rl_dest.s_reg_low)))) {
     def_start = cu->last_lir_insn;
-    DCHECK_EQ((SRegToVReg(cu, rl_dest.s_reg_low)+1),
-              SRegToVReg(cu, GetSRegHi(rl_dest.s_reg_low)));
+    DCHECK_EQ((cu->mir_graph->SRegToVReg(rl_dest.s_reg_low)+1),
+              cu->mir_graph->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
     StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
                       rl_dest.low_reg, rl_dest.high_reg);
     MarkClean(cu, rl_dest);
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index 4fbb16b..d7f9dce 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -61,7 +61,8 @@
  * done:
  *
  */
-void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -140,7 +141,8 @@
  *   jr    r_RA
  * done:
  */
-void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                  RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -341,7 +343,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
       (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
   int check_reg = AllocTemp(cu);
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index f889ece..c9d0e21 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -151,9 +151,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/mips/int_mips.cc b/src/compiler/dex/quick/mips/int_mips.cc
index d648c44..b1fa623 100644
--- a/src/compiler/dex/quick/mips/int_mips.cc
+++ b/src/compiler/dex/quick/mips/int_mips.cc
@@ -637,7 +637,7 @@
   StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
   FreeTemp(cu, r_ptr);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index ab6517c..85e8a9b 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -302,7 +302,7 @@
 }
 
 /*
- * TUNING: is leaf?  Can't just use "has_invoke" to determine as some
+ * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
  * instructions might call out to C/assembly helper functions.  Until
  * machinery is in place, always spill lr.
  */
@@ -339,9 +339,9 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rMIPS_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -351,7 +351,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rMIPS_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 3cce26e..0b85e92 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -17,6 +17,7 @@
 #include "object_utils.h"
 
 #include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/dataflow_iterator.h"
 #include "local_optimizations.h"
 #include "codegen_util.h"
 #include "ralloc_util.h"
@@ -91,21 +92,21 @@
                                                           cu->class_def_idx)) {
         cg->GenMemBarrier(cu, kStoreStore);
       }
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       break;
 
     case Instruction::RETURN:
     case Instruction::RETURN_OBJECT:
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       cg->StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
       break;
 
     case Instruction::RETURN_WIDE:
-      if (!(cu->attrs & METHOD_IS_LEAF)) {
+      if (!(cu->attributes & METHOD_IS_LEAF)) {
         cg->GenSuspendTest(cu, opt_flags);
       }
       cg->StoreValueWide(cu, GetReturnWide(cu,
@@ -253,11 +254,11 @@
       break;
 
     case Instruction::PACKED_SWITCH:
-      cg->GenPackedSwitch(cu, vB, rl_src[0]);
+      cg->GenPackedSwitch(cu, mir, vB, rl_src[0]);
       break;
 
     case Instruction::SPARSE_SWITCH:
-      cg->GenSparseSwitch(cu, vB, rl_src[0]);
+      cg->GenSparseSwitch(cu, mir, vB, rl_src[0]);
       break;
 
     case Instruction::CMPL_FLOAT:
@@ -283,8 +284,8 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const && rl_src[1].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg],
-                                       cu->constant_values[rl_src[1].orig_sreg]);
+        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg),
+                                       cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
         if (is_taken && backward_branch) {
           cg->GenSuspendTest(cu, opt_flags);
         }
@@ -312,7 +313,7 @@
       backward_branch = (bb->taken->start_offset <= mir->offset);
       // Result known at compile time?
       if (rl_src[0].is_const) {
-        bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg], 0);
+        bool is_taken = EvaluateBranch(opcode, cu->mir_graph->ConstantValue(rl_src[0].orig_sreg), 0);
         if (is_taken && backward_branch) {
           cg->GenSuspendTest(cu, opt_flags);
         }
@@ -540,13 +541,13 @@
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
       if (rl_src[0].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[0]))) {
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[0]))) {
         cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[1],
-                             cu->constant_values[rl_src[0].orig_sreg]);
+                             cu->mir_graph->ConstantValue(rl_src[0].orig_sreg));
       } else if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
         cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
-                             cu->constant_values[rl_src[1].orig_sreg]);
+                             cu->mir_graph->ConstantValue(rl_src[1].orig_sreg));
       } else {
         cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       }
@@ -565,8 +566,8 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       if (rl_src[1].is_const &&
-          cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
-        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], ConstantValue(cu, rl_src[1]));
+          cu->cg->InexpensiveConstantInt(cu->mir_graph->ConstantValue(rl_src[1]))) {
+        cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], cu->mir_graph->ConstantValue(rl_src[1]));
       } else {
         cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
       }
@@ -707,7 +708,6 @@
   LIR* label_list = cu->block_label_list;
   int block_id = bb->id;
 
-  cu->cur_block = bb;
   label_list[block_id].operands[0] = bb->start_offset;
 
   // Insert the block label.
@@ -745,10 +745,10 @@
       ResetDefTracking(cu);
     }
 
-#ifndef NDEBUG
     // Reset temp tracking sanity check.
-    cu->live_sreg = INVALID_SREG;
-#endif
+    if (kIsDebugBuild) {
+      cu->live_sreg = INVALID_SREG;
+    }
 
     cu->current_dalvik_offset = mir->offset;
     int opcode = mir->dalvikInsn.opcode;
@@ -800,12 +800,12 @@
 {
   Codegen* cg = cu->cg.get();
   // Find the first DalvikByteCode block.
-  int num_reachable_blocks = cu->num_reachable_blocks;
-  const GrowableList *block_list = &cu->block_list;
+  int num_reachable_blocks = cu->mir_graph->GetNumReachableBlocks();
   BasicBlock*bb = NULL;
   for (int idx = 0; idx < num_reachable_blocks; idx++) {
-    int dfs_index = cu->dfs_order.elem_list[idx];
-    bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_index));
+    // TODO: no direct access of growable lists.
+    int dfs_index = cu->mir_graph->GetDfsOrder()->elem_list[idx];
+    bb = cu->mir_graph->GetBasicBlock(dfs_index);
     if (bb->block_type == kDalvikByteCode) {
       break;
     }
@@ -832,10 +832,12 @@
   Codegen* cg = cu->cg.get();
   // Hold the labels of each block.
   cu->block_label_list =
-      static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->num_blocks, true, kAllocLIR));
+      static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->mir_graph->GetNumBlocks(), true, kAllocLIR));
 
-  DataFlowAnalysisDispatcher(cu, MethodBlockCodeGen,
-                                kPreOrderDFSTraversal, false /* Iterative */);
+  DataflowIterator iter(cu->mir_graph.get(), kPreOrderDFSTraversal, false /* not iterative */);
+  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    MethodBlockCodeGen(cu, bb);
+  }
 
   cg->HandleSuspendLaunchPads(cu);
 
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index 5b7de2c..18c7714 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -17,8 +17,9 @@
 /* This file contains register alloction support. */
 
 #include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_internals.h"
 #include "compiler/dex/compiler_utility.h"
-#include "compiler/dex/dataflow.h"
+//#include "compiler/dex/dataflow.h"
 #include "compiler/dex/quick/codegen_util.h"
 #include "ralloc_util.h"
 
@@ -137,12 +138,12 @@
  */
 void ClobberSReg(CompilationUnit* cu, int s_reg)
 {
-#ifndef NDEBUG
   /* Reset live temp tracking sanity checker */
-  if (s_reg == cu->live_sreg) {
-    cu->live_sreg = INVALID_SREG;
+  if (kIsDebugBuild) {
+    if (s_reg == cu->live_sreg) {
+      cu->live_sreg = INVALID_SREG;
+    }
   }
-#endif
   ClobberSRegBody(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs, s_reg);
   ClobberSRegBody(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs, s_reg);
 }
@@ -158,9 +159,9 @@
  */
 int SRegToPMap(CompilationUnit* cu, int s_reg)
 {
-  DCHECK_LT(s_reg, cu->num_ssa_regs);
+  DCHECK_LT(s_reg, cu->mir_graph->GetNumSSARegs());
   DCHECK_GE(s_reg, 0);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   if (v_reg >= 0) {
     DCHECK_LT(v_reg, cu->num_dalvik_registers);
     return v_reg;
@@ -175,7 +176,7 @@
 {
   Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   cg->GetRegInfo(cu, reg)->in_use = true;
   cu->core_spill_mask |= (1 << reg);
   // Include reg for later sort
@@ -205,7 +206,7 @@
 {
   Codegen* cg = cu->cg.get();
   int p_map_idx = SRegToPMap(cu, s_reg);
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   cg->GetRegInfo(cu, reg)->in_use = true;
   cg->MarkPreservedSingle(cu, v_reg, reg);
   cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
@@ -244,7 +245,7 @@
 {
   Codegen* cg = cu->cg.get();
   int res = -1; // Assume failure
-  int v_reg = SRegToVReg(cu, s_reg);
+  int v_reg = cu->mir_graph->SRegToVReg(s_reg);
   int p_map_idx = SRegToPMap(cu, s_reg);
   if (cu->promotion_map[p_map_idx+1].fp_location == kLocPhysReg) {
     // Upper reg is already allocated.  Can we fit?
@@ -1088,13 +1089,13 @@
       (bb->block_type == kDalvikByteCode))) {
     return;
   }
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
+  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
     RegLocation loc = cu->reg_location[i];
     RefCounts* counts = loc.fp ? fp_counts : core_counts;
     int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
     //Don't count easily regenerated immediates
     if (loc.fp || !IsInexpensiveConstant(cu, loc)) {
-      counts[p_map_idx].count += cu->raw_use_counts.elem_list[i];
+      counts[p_map_idx].count += cu->mir_graph->GetUseCount(i);
     }
     if (loc.wide && loc.fp && !loc.high_word) {
       counts[p_map_idx].double_start = true;
@@ -1162,8 +1163,7 @@
     FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
   }
 
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&cu->block_list, &iterator);
+  GrowableListIterator iterator = cu->mir_graph->GetBasicBlockIterator();
   while (true) {
     BasicBlock* bb;
     bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
@@ -1217,19 +1217,10 @@
         }
       }
     }
-  } else if (cu->qd_mode) {
-    AllocPreservedCoreReg(cu, cu->method_sreg);
-    for (int i = 0; i < num_regs; i++) {
-      int reg = AllocPreservedCoreReg(cu, i);
-      if (reg < 0) {
-         break;  // No more left
-      }
-    }
   }
 
-
   // Now, update SSA names to new home locations
-  for (int i = 0; i < cu->num_ssa_regs; i++) {
+  for (int i = 0; i < cu->mir_graph->GetNumSSARegs(); i++) {
     RegLocation *curr = &cu->reg_location[i];
     int p_map_idx = SRegToPMap(cu, curr->s_reg_low);
     if (!curr->wide) {
@@ -1292,7 +1283,7 @@
 /* Returns sp-relative offset in bytes for a SReg */
 int SRegOffset(CompilationUnit* cu, int s_reg)
 {
-  return VRegOffset(cu, SRegToVReg(cu, s_reg));
+  return VRegOffset(cu, cu->mir_graph->SRegToVReg(s_reg));
 }
 
 RegLocation GetBadLoc()
@@ -1331,4 +1322,20 @@
   return res;
 }
 
+void Codegen::SimpleRegAlloc(CompilationUnit* cu)
+{
+  DoPromotion(cu);
+
+  /* Get easily-accessable post-promotion copy of RegLocation for Method* */
+  cu->method_loc = cu->reg_location[cu->method_sreg];
+
+  if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
+    LOG(INFO) << "After Promotion";
+    cu->mir_graph->DumpRegLocTable(cu->reg_location, cu->mir_graph->GetNumSSARegs());
+  }
+
+  /* Set the frame size */
+  cu->frame_size = cu->mir_graph->ComputeFrameSize();
+}
+
 }  // namespace art
diff --git a/src/compiler/dex/quick/ralloc_util.h b/src/compiler/dex/quick/ralloc_util.h
index 3287047..1f99600 100644
--- a/src/compiler/dex/quick/ralloc_util.h
+++ b/src/compiler/dex/quick/ralloc_util.h
@@ -23,7 +23,6 @@
 
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_utility.h"
-#include "compiler/dex/dataflow.h"
 
 namespace art {
 
@@ -155,7 +154,6 @@
 int SRegOffset(CompilationUnit* cu, int reg);
 void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
 void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
-int ComputeFrameSize(CompilationUnit* cu);
 int SRegToPMap(CompilationUnit* cu, int s_reg);
 void DumpRegPool(RegisterInfo* p, int num_regs);
 
diff --git a/src/compiler/dex/quick/x86/assemble_x86.cc b/src/compiler/dex/quick/x86/assemble_x86.cc
index 9ee0cb5..2369e49 100644
--- a/src/compiler/dex/quick/x86/assemble_x86.cc
+++ b/src/compiler/dex/quick/x86/assemble_x86.cc
@@ -1099,11 +1099,11 @@
                       int base_or_table, uint8_t index, int scale, int table_or_disp) {
   int disp;
   if (entry->opcode == kX86PcRelLoadRA) {
-    SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(table_or_disp);
+    Codegen::SwitchTable *tab_rec = reinterpret_cast<Codegen::SwitchTable*>(table_or_disp);
     disp = tab_rec->offset;
   } else {
     DCHECK(entry->opcode == kX86PcRelAdr);
-    FillArrayData *tab_rec = reinterpret_cast<FillArrayData*>(base_or_table);
+    Codegen::FillArrayData *tab_rec = reinterpret_cast<Codegen::FillArrayData*>(base_or_table);
     disp = tab_rec->offset;
   }
   if (entry->skeleton.prefix1 != 0) {
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 7b1a7fb..f30e966 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -33,7 +33,8 @@
  * The sparse table in the literal pool is an array of <key,displacement>
  * pairs.
  */
-void X86Codegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void X86Codegen::GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -45,7 +46,8 @@
   rl_src = LoadValue(cu, rl_src, kCoreReg);
   for (int i = 0; i < entries; i++) {
     int key = keys[i];
-    BasicBlock* case_block = FindBlock(cu, cu->current_dalvik_offset + targets[i]);
+    BasicBlock* case_block =
+        cu->mir_graph.get()->FindBlock(cu->current_dalvik_offset + targets[i]);
     LIR* label_list = cu->block_label_list;
     OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
                    &label_list[case_block->id]);
@@ -68,7 +70,8 @@
  * jmp  r_start_of_method
  * done:
  */
-void X86Codegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+void X86Codegen::GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
 {
   const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
   if (cu->verbose) {
@@ -237,7 +240,7 @@
    * We can safely skip the stack overflow check if we're
    * a leaf *and* our frame size < fudge factor.
    */
-  bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+  bool skip_overflow_check = ((cu->attributes & METHOD_IS_LEAF) &&
                 (static_cast<size_t>(cu->frame_size) <
                 Thread::kStackOverflowReservedBytes));
   NewLIR0(cu, kPseudoMethodEntry);
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index c1e8fb3..35c976a 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -151,9 +151,9 @@
                                                int second_bit);
     virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenPackedSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
-    virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+    virtual void GenSparseSwitch(CompilationUnit* cu, MIR* mir, uint32_t table_offset,
                                  RegLocation rl_src);
     virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
                                 SpecialCaseHandler special_case);
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index 81b1d72..984eaef 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -585,7 +585,7 @@
   StoreBaseIndexedDisp(cu, r_array, r_index, scale,
                        data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
   FreeTemp(cu, r_index);
-  if (!IsConstantNullRef(cu, rl_src)) {
+  if (!cu->mir_graph->IsConstantNullRef(rl_src)) {
     MarkGCCard(cu, r_value, r_array);
   }
 }
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index cb41fde..ed07220 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -341,9 +341,9 @@
 
     info1->dirty = false;
     info2->dirty = false;
-    if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+    if (cu->mir_graph->SRegToVReg(info2->s_reg) < cu->mir_graph->SRegToVReg(info1->s_reg))
       info1 = info2;
-    int v_reg = SRegToVReg(cu, info1->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info1->s_reg);
     StoreBaseDispWide(cu, rX86_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
   }
 }
@@ -353,7 +353,7 @@
   RegisterInfo* info = GetRegInfo(cu, reg);
   if (info->live && info->dirty) {
     info->dirty = false;
-    int v_reg = SRegToVReg(cu, info->s_reg);
+    int v_reg = cu->mir_graph->SRegToVReg(info->s_reg);
     StoreBaseDisp(cu, rX86_SP, VRegOffset(cu, v_reg), reg, kWord);
   }
 }