Revert "Rework Quick compiler's register handling"

This reverts commit 2c1ed456dcdb027d097825dd98dbe48c71599b6c.

Change-Id: If88d69ba88e0af0b407ff2240566d7e4545d8a99
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 3708f01..0613cdf 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -40,7 +40,7 @@
     int key = keys[i];
     BasicBlock* case_block =
         mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
-    OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key,
+    OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
                    &block_label_list_[case_block->id]);
   }
 }
@@ -87,7 +87,7 @@
     // We can use the saved value.
     RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
     rl_method = LoadValue(rl_method, kCoreReg);
-    start_of_method_reg = rl_method.reg.GetReg();
+    start_of_method_reg = rl_method.low_reg;
     store_method_addr_used_ = true;
   } else {
     start_of_method_reg = AllocTemp();
@@ -97,10 +97,10 @@
   int keyReg;
   // Remove the bias, if necessary
   if (low_key == 0) {
-    keyReg = rl_src.reg.GetReg();
+    keyReg = rl_src.low_reg;
   } else {
     keyReg = AllocTemp();
-    OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size-1);
@@ -164,7 +164,7 @@
 void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
   int ex_offset = Thread::ExceptionOffset().Int32Value();
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
+  NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
   NewLIR2(kX86Mov32TI, ex_offset, 0);
   StoreValue(rl_dest, rl_result);
 }
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 55f18ef..421d51e 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -48,9 +48,8 @@
     // Required for target - register utilities.
     bool IsFpReg(int reg);
     bool SameRegType(int reg1, int reg2);
-    // TODO: for consistency, make this return a RegStorage as well?
     int AllocTypedTemp(bool fp_hint, int reg_class);
-    RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+    int AllocTypedTempPair(bool fp_hint, int reg_class);
     int S2d(int low_reg, int high_reg);
     int TargetReg(SpecialTargetRegister reg);
     int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 1827901..4c2ecc0 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -63,9 +63,9 @@
   rl_src1 = LoadValue(rl_src1, kFPReg);
   rl_src2 = LoadValue(rl_src2, kFPReg);
   rl_result = EvalLoc(rl_dest, kFPReg, true);
-  int r_dest = rl_result.reg.GetReg();
-  int r_src1 = rl_src1.reg.GetReg();
-  int r_src2 = rl_src2.reg.GetReg();
+  int r_dest = rl_result.low_reg;
+  int r_src1 = rl_src1.low_reg;
+  int r_src2 = rl_src2.low_reg;
   if (r_dest == r_src2) {
     r_src2 = AllocTempFloat();
     OpRegCopy(r_src2, r_dest);
@@ -118,9 +118,9 @@
   rl_result = EvalLoc(rl_dest, kFPReg, true);
   DCHECK(rl_dest.wide);
   DCHECK(rl_result.wide);
-  int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg());
-  int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
-  int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+  int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+  int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+  int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
   if (r_dest == r_src2) {
     r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
     OpRegCopy(r_src2, r_dest);
@@ -140,7 +140,7 @@
 
   // If the source is in physical register, then put it in its location on stack.
   if (rl_src.location == kLocPhysReg) {
-    RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg());
+    RegisterInfo* lo_info = GetRegInfo(rl_src.low_reg);
 
     if (lo_info != nullptr && lo_info->is_temp) {
       // Calling FlushSpecificReg because it will only write back VR if it is dirty.
@@ -148,7 +148,7 @@
     } else {
       // It must have been register promoted if it is not a temp but is still in physical
       // register. Since we need it to be in memory to convert, we place it there now.
-      StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+      StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.low_reg, rl_src.high_reg);
     }
   }
 
@@ -181,13 +181,13 @@
     if (is_double) {
       rl_result = EvalLocWide(rl_dest, kFPReg, true);
 
-      LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+      LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
 
       StoreValueWide(rl_dest, rl_result);
     } else {
       rl_result = EvalLoc(rl_dest, kFPReg, true);
 
-      LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg());
+      LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg);
 
       StoreValue(rl_dest, rl_result);
     }
@@ -219,21 +219,21 @@
       break;
     case Instruction::FLOAT_TO_INT: {
       rl_src = LoadValue(rl_src, kFPReg);
-      src_reg = rl_src.reg.GetReg();
+      src_reg = rl_src.low_reg;
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
       ClobberSReg(rl_dest.s_reg_low);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
       int temp_reg = AllocTempFloat();
 
-      LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
-      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
       NewLIR2(kX86ComissRR, src_reg, temp_reg);
       LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
       LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg);
+      NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
       LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
       branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
       branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
       branch_normal->target = NewLIR0(kPseudoTargetLabel);
       StoreValue(rl_dest, rl_result);
@@ -241,21 +241,21 @@
     }
     case Instruction::DOUBLE_TO_INT: {
       rl_src = LoadValueWide(rl_src, kFPReg);
-      src_reg = rl_src.reg.GetReg();
+      src_reg = rl_src.low_reg;
       // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
       ClobberSReg(rl_dest.s_reg_low);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
       int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
 
-      LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
-      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
       NewLIR2(kX86ComisdRR, src_reg, temp_reg);
       LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
       LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg);
+      NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
       LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
       branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
       branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
       branch_normal->target = NewLIR0(kPseudoTargetLabel);
       StoreValue(rl_dest, rl_result);
@@ -278,18 +278,18 @@
   }
   if (rl_src.wide) {
     rl_src = LoadValueWide(rl_src, rcSrc);
-    src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+    src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
   } else {
     rl_src = LoadValue(rl_src, rcSrc);
-    src_reg = rl_src.reg.GetReg();
+    src_reg = rl_src.low_reg;
   }
   if (rl_dest.wide) {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
     StoreValueWide(rl_dest, rl_result);
   } else {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
+    NewLIR2(op, rl_result.low_reg, src_reg);
     StoreValue(rl_dest, rl_result);
   }
 }
@@ -302,19 +302,19 @@
   int src_reg2;
   if (single) {
     rl_src1 = LoadValue(rl_src1, kFPReg);
-    src_reg1 = rl_src1.reg.GetReg();
+    src_reg1 = rl_src1.low_reg;
     rl_src2 = LoadValue(rl_src2, kFPReg);
-    src_reg2 = rl_src2.reg.GetReg();
+    src_reg2 = rl_src2.low_reg;
   } else {
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+    src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+    src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
   }
   // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
   ClobberSReg(rl_dest.s_reg_low);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0);
+  LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
   if (single) {
     NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
   } else {
@@ -325,20 +325,20 @@
     branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
   }
   // If the result reg can't be byte accessed, use a jump and move instead of a set.
-  if (rl_result.reg.GetReg() >= 4) {
+  if (rl_result.low_reg >= 4) {
     LIR* branch2 = NULL;
     if (unordered_gt) {
       branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
-      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
     } else {
       branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
-      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
     }
     branch2->target = NewLIR0(kPseudoTargetLabel);
   } else {
-    NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
   }
-  NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
+  NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
   if (unordered_gt) {
     branch->target = NewLIR0(kPseudoTargetLabel);
   }
@@ -357,14 +357,14 @@
     rl_src2 = mir_graph_->GetSrcWide(mir, 2);
     rl_src1 = LoadValueWide(rl_src1, kFPReg);
     rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
-            S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+    NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+            S2d(rl_src2.low_reg, rl_src2.high_reg));
   } else {
     rl_src1 = mir_graph_->GetSrc(mir, 0);
     rl_src2 = mir_graph_->GetSrc(mir, 1);
     rl_src1 = LoadValue(rl_src1, kFPReg);
     rl_src2 = LoadValue(rl_src2, kFPReg);
-    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+    NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
   }
   ConditionCode ccode = mir->meta.ccode;
   switch (ccode) {
@@ -418,7 +418,7 @@
   RegLocation rl_result;
   rl_src = LoadValue(rl_src, kCoreReg);
   rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
   StoreValue(rl_dest, rl_result);
 }
 
@@ -426,8 +426,8 @@
   RegLocation rl_result;
   rl_src = LoadValueWide(rl_src, kCoreReg);
   rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
-  OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -436,8 +436,8 @@
   RegLocation rl_dest = InlineTargetWide(info);  // double place for result
   rl_src = LoadValueWide(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
-          S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+  NewLIR2(kX86SqrtsdRR, S2d(rl_result.low_reg, rl_result.high_reg),
+          S2d(rl_src.low_reg, rl_src.high_reg));
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 362ab2e..5f04b7d 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -216,21 +216,21 @@
      *     mov t1, $false_case
      *     cmovnz result_reg, t1
      */
-    const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
+    const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.low_reg == rl_result.low_reg);
     const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
     const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
     const bool catch_all_case = !(true_zero_case || false_zero_case);
 
     if (true_zero_case || false_zero_case) {
-      OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+      OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
     }
 
     if (true_zero_case || false_zero_case || catch_all_case) {
-      OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+      OpRegImm(kOpCmp, rl_src.low_reg, 0);
     }
 
     if (catch_all_case) {
-      OpRegImm(kOpMov, rl_result.reg.GetReg(), true_val);
+      OpRegImm(kOpMov, rl_result.low_reg, true_val);
     }
 
     if (true_zero_case || false_zero_case || catch_all_case) {
@@ -239,7 +239,7 @@
       OpRegImm(kOpMov, temp1_reg, immediateForTemp);
 
       ConditionCode cc = false_zero_case ? kCondEq : kCondNe;
-      OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg);
+      OpCondRegReg(kOpCmov, cc, rl_result.low_reg, temp1_reg);
 
       FreeTemp(temp1_reg);
     }
@@ -264,15 +264,15 @@
      */
 
     // kMirOpSelect is generated just for conditional cases when comparison is done with zero.
-    OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+    OpRegImm(kOpCmp, rl_src.low_reg, 0);
 
-    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
-      OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
-    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
-      OpCondRegReg(kOpCmov, kCondEq, rl_result.reg.GetReg(), rl_true.reg.GetReg());
+    if (rl_result.low_reg == rl_true.low_reg) {
+      OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
+    } else if (rl_result.low_reg == rl_false.low_reg) {
+      OpCondRegReg(kOpCmov, kCondEq, rl_result.low_reg, rl_true.low_reg);
     } else {
-      OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
-      OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
+      OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+      OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
     }
   }
 
@@ -337,8 +337,8 @@
   LIR* taken = &block_label_list_[bb->taken];
   LIR* not_taken = &block_label_list_[bb->fall_through];
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  int32_t low_reg = rl_src1.reg.GetReg();
-  int32_t high_reg = rl_src1.reg.GetHighReg();
+  int32_t low_reg = rl_src1.low_reg;
+  int32_t high_reg = rl_src1.high_reg;
 
   if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
     int t_reg = AllocTemp();
@@ -461,7 +461,7 @@
 
   // Assume that the result will be in EDX.
   RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-                           RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG};
+                          r2, INVALID_REG, INVALID_SREG, INVALID_SREG};
 
   // handle div/rem by 1 special case.
   if (imm == 1) {
@@ -472,7 +472,7 @@
       // x % 1 == 0.
       LoadConstantNoClobber(r0, 0);
       // For this case, return the result in EAX.
-      rl_result.reg.SetReg(r0);
+      rl_result.low_reg = r0;
     }
   } else if (imm == -1) {  // handle 0x80000000 / -1 special case.
     if (is_div) {
@@ -494,7 +494,7 @@
       LoadConstantNoClobber(r0, 0);
     }
     // For this case, return the result in EAX.
-    rl_result.reg.SetReg(r0);
+    rl_result.low_reg = r0;
   } else {
     CHECK(imm <= -2 || imm >= 2);
     // Use H.S.Warren's Hacker's Delight Chapter 10 and
@@ -524,8 +524,8 @@
       // We will need the value later.
       if (rl_src.location == kLocPhysReg) {
         // We can use it directly.
-        DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
-        numerator_reg = rl_src.reg.GetReg();
+        DCHECK(rl_src.low_reg != r0 && rl_src.low_reg != r2);
+        numerator_reg = rl_src.low_reg;
       } else {
         LoadValueDirectFixed(rl_src, r1);
         numerator_reg = r1;
@@ -582,7 +582,7 @@
       NewLIR2(kX86Sub32RR, r0, r2);
 
       // For this case, return the result in EAX.
-      rl_result.reg.SetReg(r0);
+      rl_result.low_reg = r0;
     }
   }
 
@@ -638,9 +638,9 @@
 
   // Result is in EAX for div and EDX for rem.
   RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-                           RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+                          r0, INVALID_REG, INVALID_SREG, INVALID_SREG};
   if (!is_div) {
-    rl_result.reg.SetReg(r2);
+    rl_result.low_reg = r2;
   }
   return rl_result;
 }
@@ -662,22 +662,22 @@
    * The reason is that the first copy will inadvertently clobber the second element with
    * the first one thus yielding the wrong result. Thus we do a swap in that case.
    */
-  if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+  if (rl_result.low_reg == rl_src2.low_reg) {
     std::swap(rl_src1, rl_src2);
   }
 
   // Pick the first integer as min/max.
-  OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+  OpRegCopy(rl_result.low_reg, rl_src1.low_reg);
 
   // If the integers are both in the same register, then there is nothing else to do
   // because they are equal and we have already moved one into the result.
-  if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
+  if (rl_src1.low_reg != rl_src2.low_reg) {
     // It is possible we didn't pick correctly so do the actual comparison now.
-    OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+    OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
 
     // Conditionally move the other integer into the destination register.
     ConditionCode condition_code = is_min ? kCondGt : kCondLt;
-    OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    OpCondRegReg(kOpCmov, condition_code, rl_result.low_reg, rl_src2.low_reg);
   }
 
   StoreValue(rl_dest, rl_result);
@@ -692,12 +692,12 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (size == kLong) {
     // Unaligned access is allowed on x86.
-    LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+    LoadBaseDispWide(rl_address.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
     StoreValueWide(rl_dest, rl_result);
   } else {
     DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
     // Unaligned access is allowed on x86.
-    LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+    LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
     StoreValue(rl_dest, rl_result);
   }
   return true;
@@ -711,12 +711,12 @@
   if (size == kLong) {
     // Unaligned access is allowed on x86.
     RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
-    StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
+    StoreBaseDispWide(rl_address.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
   } else {
     DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
     // Unaligned access is allowed on x86.
     RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
-    StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+    StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
   }
   return true;
 }
@@ -776,13 +776,13 @@
     if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
       // Mark card for object assuming new value is stored.
       FreeTemp(r0);  // Temporarily release EAX for MarkGCCard().
-      MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+      MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
       LockTemp(r0);
     }
 
     RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
     LoadValueDirect(rl_src_expected, r0);
-    NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
+    NewLIR5(kX86LockCmpxchgAR, rl_object.low_reg, rl_offset.low_reg, 0, 0, rl_new_value.low_reg);
 
     FreeTemp(r0);
   }
@@ -790,8 +790,8 @@
   // Convert ZF to boolean
   RegLocation rl_dest = InlineTarget(info);  // boolean place for result
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondZ);
-  NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+  NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondZ);
+  NewLIR2(kX86Movzx8RR, rl_result.low_reg, rl_result.low_reg);
   StoreValue(rl_dest, rl_result);
   return true;
 }
@@ -830,11 +830,11 @@
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit) {
   int t_reg = AllocTemp();
-  OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
-  OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
   FreeTemp(t_reg);
   if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
   }
 }
 
@@ -918,8 +918,8 @@
     int64_t val = mir_graph_->ConstantValueWide(rl_src2);
     if (val == 0) {
       RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-      OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-      OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());
+      OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
+      OpRegReg(kOpXor, rl_result.high_reg, rl_result.high_reg);
       StoreValueWide(rl_dest, rl_result);
       return;
     } else if (val == 1) {
@@ -952,8 +952,8 @@
     // ECX <- 1H * 2L
     // EAX <- 1L * 2H
     if (src1_in_reg) {
-      GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo);
-      GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi);
+      GenImulRegImm(r1, rl_src1.high_reg, val_lo);
+      GenImulRegImm(r0, rl_src1.low_reg, val_hi);
     } else {
       GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
       GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
@@ -967,7 +967,7 @@
 
     // EDX:EAX <- 2L * 1L (double precision)
     if (src1_in_reg) {
-      NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+      NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
     } else {
       LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
       AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -978,8 +978,7 @@
     NewLIR2(kX86Add32RR, r2, r1);
 
     // Result is EDX:EAX
-    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-                             RegStorage(RegStorage::k64BitPair, r0, r2),
+    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
                              INVALID_SREG, INVALID_SREG};
     StoreValueWide(rl_dest, rl_result);
     return;
@@ -1001,7 +1000,7 @@
 
   // ECX <- 1H
   if (src1_in_reg) {
-    NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
+    NewLIR2(kX86Mov32RR, r1, rl_src1.high_reg);
   } else {
     LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1,
                  kWord, GetSRegHi(rl_src1.s_reg_low));
@@ -1011,7 +1010,7 @@
     // Take advantage of the fact that the values are the same.
     // ECX <- ECX * 2L  (1H * 2L)
     if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+      NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
     } else {
       int displacement = SRegOffset(rl_src2.s_reg_low);
       LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1024,7 +1023,7 @@
   } else {
     // EAX <- 2H
     if (src2_in_reg) {
-      NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
+      NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg);
     } else {
       LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0,
                    kWord, GetSRegHi(rl_src2.s_reg_low));
@@ -1032,7 +1031,7 @@
 
     // EAX <- EAX * 1L  (2H * 1L)
     if (src1_in_reg) {
-      NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg());
+      NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg);
     } else {
       int displacement = SRegOffset(rl_src1.s_reg_low);
       LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1042,7 +1041,7 @@
 
     // ECX <- ECX * 2L  (1H * 2L)
     if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+      NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
     } else {
       int displacement = SRegOffset(rl_src2.s_reg_low);
       LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1056,7 +1055,7 @@
 
   // EAX <- 2L
   if (src2_in_reg) {
-    NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg());
+    NewLIR2(kX86Mov32RR, r0, rl_src2.low_reg);
   } else {
     LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0,
                  kWord, rl_src2.s_reg_low);
@@ -1064,7 +1063,7 @@
 
   // EDX:EAX <- 2L * 1L (double precision)
   if (src1_in_reg) {
-    NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+    NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
   } else {
     int displacement = SRegOffset(rl_src1.s_reg_low);
     LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1076,8 +1075,8 @@
   NewLIR2(kX86Add32RR, r2, r1);
 
   // Result is EDX:EAX
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-                           RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG};
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
+                           INVALID_SREG, INVALID_SREG};
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -1087,18 +1086,18 @@
   X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
   if (rl_src.location == kLocPhysReg) {
     // Both operands are in registers.
-    if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) {
+    if (rl_dest.low_reg == rl_src.high_reg) {
       // The registers are the same, so we would clobber it before the use.
       int temp_reg = AllocTemp();
-      OpRegCopy(temp_reg, rl_dest.reg.GetReg());
-      rl_src.reg.SetHighReg(temp_reg);
+      OpRegCopy(temp_reg, rl_dest.low_reg);
+      rl_src.high_reg = temp_reg;
     }
-    NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+    NewLIR2(x86op, rl_dest.low_reg, rl_src.low_reg);
 
     x86op = GetOpcode(op, rl_dest, rl_src, true);
-    NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
-    FreeTemp(rl_src.reg.GetReg());
-    FreeTemp(rl_src.reg.GetHighReg());
+    NewLIR2(x86op, rl_dest.high_reg, rl_src.high_reg);
+    FreeTemp(rl_src.low_reg);
+    FreeTemp(rl_src.high_reg);
     return;
   }
 
@@ -1108,11 +1107,11 @@
   int rBase = TargetReg(kSp);
   int displacement = SRegOffset(rl_src.s_reg_low);
 
-  LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET);
+  LIR *lir = NewLIR3(x86op, rl_dest.low_reg, rBase, displacement + LOWORD_OFFSET);
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
                           true /* is_load */, true /* is64bit */);
   x86op = GetOpcode(op, rl_dest, rl_src, true);
-  lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET);
+  lir = NewLIR3(x86op, rl_dest.high_reg, rBase, displacement + HIWORD_OFFSET);
   AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
                           true /* is_load */, true /* is64bit */);
 }
@@ -1139,15 +1138,15 @@
   int rBase = TargetReg(kSp);
   int displacement = SRegOffset(rl_dest.s_reg_low);
 
-  LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg());
+  LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.low_reg);
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
                           false /* is_load */, true /* is64bit */);
   x86op = GetOpcode(op, rl_dest, rl_src, true);
-  lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
+  lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.high_reg);
   AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
                           false /* is_load */, true /* is64bit */);
-  FreeTemp(rl_src.reg.GetReg());
-  FreeTemp(rl_src.reg.GetHighReg());
+  FreeTemp(rl_src.low_reg);
+  FreeTemp(rl_src.high_reg);
 }
 
 void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -1189,12 +1188,12 @@
 
   // Get one of the source operands into temporary register.
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
+  if (IsTemp(rl_src1.low_reg) && IsTemp(rl_src1.high_reg)) {
     GenLongRegOrMemOp(rl_src1, rl_src2, op);
   } else if (is_commutative) {
     rl_src2 = LoadValueWide(rl_src2, kCoreReg);
     // We need at least one of them to be a temporary.
-    if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
+    if (!(IsTemp(rl_src2.low_reg) && IsTemp(rl_src2.high_reg))) {
       rl_src1 = ForceTempWide(rl_src1);
     }
     GenLongRegOrMemOp(rl_src1, rl_src2, op);
@@ -1235,16 +1234,15 @@
 void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = ForceTempWide(rl_src);
-  if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
-      ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) {
+  if (rl_dest.low_reg == rl_src.high_reg) {
     // The registers are the same, so we would clobber it before the use.
     int temp_reg = AllocTemp();
-    OpRegCopy(temp_reg, rl_result.reg.GetReg());
-    rl_result.reg.SetHighReg(temp_reg);
+    OpRegCopy(temp_reg, rl_result.low_reg);
+    rl_result.high_reg = temp_reg;
   }
-  OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg());    // rLow = -rLow
-  OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0);                   // rHigh = rHigh + CF
-  OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());  // rHigh = -rHigh
+  OpRegReg(kOpNeg, rl_result.low_reg, rl_result.low_reg);    // rLow = -rLow
+  OpRegImm(kOpAdc, rl_result.high_reg, 0);                   // rHigh = rHigh + CF
+  OpRegReg(kOpNeg, rl_result.high_reg, rl_result.high_reg);  // rHigh = -rHigh
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -1286,29 +1284,29 @@
     // If index is constant, just fold it into the data offset
     data_offset += constant_index_value << scale;
     // treat as non array below
-    rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+    rl_index.low_reg = INVALID_REG;
   }
 
   /* null object? */
-  GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
     if (constant_index) {
-      GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+      GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
                        constant_index_value, kThrowConstantArrayBounds);
     } else {
-      GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
+      GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
                      len_offset, kThrowArrayBounds);
     }
   }
   rl_result = EvalLoc(rl_dest, reg_class, true);
   if ((size == kLong) || (size == kDouble)) {
-    LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(),
-                        rl_result.reg.GetHighReg(), size, INVALID_SREG);
+    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_result.low_reg,
+                        rl_result.high_reg, size, INVALID_SREG);
     StoreValueWide(rl_dest, rl_result);
   } else {
-    LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale,
-                        data_offset, rl_result.reg.GetReg(), INVALID_REG, size,
+    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
+                        data_offset, rl_result.low_reg, INVALID_REG, size,
                         INVALID_SREG);
     StoreValue(rl_dest, rl_result);
   }
@@ -1340,18 +1338,18 @@
     constant_index_value = mir_graph_->ConstantValue(rl_index);
     data_offset += constant_index_value << scale;
     // treat as non array below
-    rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+    rl_index.low_reg = INVALID_REG;
   }
 
   /* null object? */
-  GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
 
   if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
     if (constant_index) {
-      GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+      GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
                        constant_index_value, kThrowConstantArrayBounds);
     } else {
-      GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
+      GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
                      len_offset, kThrowArrayBounds);
     }
   }
@@ -1361,21 +1359,21 @@
     rl_src = LoadValue(rl_src, reg_class);
   }
   // If the src reg can't be byte accessed, move it to a temp first.
-  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
+  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
     int temp = AllocTemp();
-    OpRegCopy(temp, rl_src.reg.GetReg());
-    StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp,
+    OpRegCopy(temp, rl_src.low_reg);
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
                          INVALID_REG, size, INVALID_SREG);
   } else {
-    StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(),
-                         rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG);
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+                         rl_src.high_reg, size, INVALID_SREG);
   }
   if (card_mark) {
     // Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
     if (!constant_index) {
-      FreeTemp(rl_index.reg.GetReg());
+      FreeTemp(rl_index.low_reg);
     }
-    MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+    MarkGCCard(rl_src.low_reg, rl_array.low_reg);
   }
 }
 
@@ -1387,52 +1385,52 @@
     case Instruction::SHL_LONG_2ADDR:
       DCHECK_NE(shift_amount, 1);  // Prevent a double store from happening.
       if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
-        LoadConstant(rl_result.reg.GetReg(), 0);
+        OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+        LoadConstant(rl_result.low_reg, 0);
       } else if (shift_amount > 31) {
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
-        FreeTemp(rl_src.reg.GetHighReg());
-        NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetReg(), 0);
+        OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+        FreeTemp(rl_src.high_reg);
+        NewLIR2(kX86Sal32RI, rl_result.high_reg, shift_amount - 32);
+        LoadConstant(rl_result.low_reg, 0);
       } else {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
-        NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount);
-        NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount);
+        OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+        OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+        NewLIR3(kX86Shld32RRI, rl_result.high_reg, rl_result.low_reg, shift_amount);
+        NewLIR2(kX86Sal32RI, rl_result.low_reg, shift_amount);
       }
       break;
     case Instruction::SHR_LONG:
     case Instruction::SHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
-        NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+        NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
       } else if (shift_amount > 31) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
-        NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32);
-        NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+        NewLIR2(kX86Sar32RI, rl_result.low_reg, shift_amount - 32);
+        NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
       } else {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
-        NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
-        NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
+        OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+        OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+        NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
+        NewLIR2(kX86Sar32RI, rl_result.high_reg, shift_amount);
       }
       break;
     case Instruction::USHR_LONG:
     case Instruction::USHR_LONG_2ADDR:
       if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        LoadConstant(rl_result.reg.GetHighReg(), 0);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        LoadConstant(rl_result.high_reg, 0);
       } else if (shift_amount > 31) {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
-        NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetHighReg(), 0);
+        OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+        NewLIR2(kX86Shr32RI, rl_result.low_reg, shift_amount - 32);
+        LoadConstant(rl_result.high_reg, 0);
       } else {
-        OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
-        OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
-        NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
-        NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
+        OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+        OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+        NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
+        NewLIR2(kX86Shr32RI, rl_result.high_reg, shift_amount);
       }
       break;
     default:
@@ -1569,7 +1567,7 @@
                                 int32_t value) {
   bool in_mem = loc.location != kLocPhysReg;
   bool byte_imm = IS_SIMM8(value);
-  DCHECK(in_mem || !IsFpReg(loc.reg.GetReg()));
+  DCHECK(in_mem || !IsFpReg(loc.low_reg));
   switch (op) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
@@ -1649,15 +1647,15 @@
 
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   DCHECK_EQ(rl_result.location, kLocPhysReg);
-  DCHECK(!IsFpReg(rl_result.reg.GetReg()));
+  DCHECK(!IsFpReg(rl_result.low_reg));
 
   if (!IsNoOp(op, val_lo)) {
     X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
-    NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+    NewLIR2(x86op, rl_result.low_reg, val_lo);
   }
   if (!IsNoOp(op, val_hi)) {
     X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
-    NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
+    NewLIR2(x86op, rl_result.high_reg, val_hi);
   }
   StoreValueWide(rl_dest, rl_result);
 }
@@ -1673,15 +1671,15 @@
 
   // Can we do this directly into the destination registers?
   if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
-      rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
-      !IsFpReg(rl_dest.reg.GetReg())) {
+      rl_dest.low_reg == rl_src1.low_reg && rl_dest.high_reg == rl_src1.high_reg &&
+      !IsFpReg(rl_dest.low_reg)) {
     if (!IsNoOp(op, val_lo)) {
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
-      NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo);
+      NewLIR2(x86op, rl_dest.low_reg, val_lo);
     }
     if (!IsNoOp(op, val_hi)) {
       X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
-      NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi);
+      NewLIR2(x86op, rl_dest.high_reg, val_hi);
     }
 
     StoreFinalValueWide(rl_dest, rl_dest);
@@ -1695,11 +1693,11 @@
   RegLocation rl_result = ForceTempWide(rl_src1);
   if (!IsNoOp(op, val_lo)) {
     X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
-    NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+    NewLIR2(x86op, rl_result.low_reg, val_lo);
   }
   if (!IsNoOp(op, val_hi)) {
     X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
-    NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
+    NewLIR2(x86op, rl_result.high_reg, val_hi);
   }
 
   StoreFinalValueWide(rl_dest, rl_result);
@@ -1711,17 +1709,17 @@
                                     RegLocation rl_dest, RegLocation rl_src) {
   RegLocation object = LoadValue(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int result_reg = rl_result.reg.GetReg();
+  int result_reg = rl_result.low_reg;
 
   // SETcc only works with EAX..EDX.
-  if (result_reg == object.reg.GetReg() || result_reg >= 4) {
+  if (result_reg == object.low_reg || result_reg >= 4) {
     result_reg = AllocTypedTemp(false, kCoreReg);
     DCHECK_LT(result_reg, 4);
   }
 
   // Assume that there is no match.
   LoadConstant(result_reg, 0);
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
 
   int check_class = AllocTypedTemp(false, kCoreReg);
 
@@ -1732,11 +1730,11 @@
 
   if (rl_method.location == kLocPhysReg) {
     if (use_declaring_class) {
-      LoadWordDisp(rl_method.reg.GetReg(),
+      LoadWordDisp(rl_method.low_reg,
                    mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
                    check_class);
     } else {
-      LoadWordDisp(rl_method.reg.GetReg(),
+      LoadWordDisp(rl_method.low_reg,
                    mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
                    check_class);
       LoadWordDisp(check_class, offset_of_type, check_class);
@@ -1757,7 +1755,7 @@
 
   // Compare the computed class to the class in the object.
   DCHECK_EQ(object.location, kLocPhysReg);
-  OpRegMem(kOpCmp, check_class, object.reg.GetReg(),
+  OpRegMem(kOpCmp, check_class, object.low_reg,
            mirror::Object::ClassOffset().Int32Value());
 
   // Set the low byte of the result to 0 or 1 from the compare condition code.
@@ -1767,7 +1765,7 @@
   null_branchover->target = target;
   FreeTemp(check_class);
   if (IsTemp(result_reg)) {
-    OpRegCopy(rl_result.reg.GetReg(), result_reg);
+    OpRegCopy(rl_result.low_reg, result_reg);
     FreeTemp(result_reg);
   }
   StoreValue(rl_dest, rl_result);
@@ -1820,7 +1818,7 @@
   RegLocation rl_result = GetReturn(false);
 
   // SETcc only works with EAX..EDX.
-  DCHECK_LT(rl_result.reg.GetReg(), 4);
+  DCHECK_LT(rl_result.low_reg, 4);
 
   // Is the class NULL?
   LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1832,13 +1830,13 @@
   LIR* branchover = nullptr;
   if (type_known_final) {
     // Ensure top 3 bytes of result are 0.
-    LoadConstant(rl_result.reg.GetReg(), 0);
+    LoadConstant(rl_result.low_reg, 0);
     OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
     // Set the low byte of the result to 0 or 1 from the compare condition code.
-    NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq);
   } else {
     if (!type_known_abstract) {
-      LoadConstant(rl_result.reg.GetReg(), 1);     // Assume result succeeds.
+      LoadConstant(rl_result.low_reg, 1);     // Assume result succeeds.
       branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
     }
     OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
@@ -1966,7 +1964,7 @@
     rl_lhs = LoadValue(rl_lhs, kCoreReg);
     rl_result = UpdateLoc(rl_dest);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg());
+    OpRegReg(op, rl_result.low_reg, rl_lhs.low_reg);
   } else {
     if (shift_op) {
       // X86 doesn't require masking and must use ECX.
@@ -1981,9 +1979,9 @@
           OpMemReg(op, rl_result, t_reg);
           FreeTemp(t_reg);
           return;
-        } else if (!IsFpReg(rl_result.reg.GetReg())) {
+        } else if (!IsFpReg(rl_result.low_reg)) {
           // Can do this directly into the result register
-          OpRegReg(op, rl_result.reg.GetReg(), t_reg);
+          OpRegReg(op, rl_result.low_reg, t_reg);
           FreeTemp(t_reg);
           StoreFinalValue(rl_dest, rl_result);
           return;
@@ -1992,7 +1990,7 @@
       // Three address form, or we can't do directly.
       rl_lhs = LoadValue(rl_lhs, kCoreReg);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg);
+      OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, t_reg);
       FreeTemp(t_reg);
     } else {
       // Multiply is 3 operand only (sort of).
@@ -2003,11 +2001,11 @@
           // Can we do this from memory directly?
           rl_rhs = UpdateLoc(rl_rhs);
           if (rl_rhs.location != kLocPhysReg) {
-            OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+            OpRegMem(op, rl_result.low_reg, rl_rhs);
             StoreFinalValue(rl_dest, rl_result);
             return;
-          } else if (!IsFpReg(rl_rhs.reg.GetReg())) {
-            OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+          } else if (!IsFpReg(rl_rhs.low_reg)) {
+            OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
             StoreFinalValue(rl_dest, rl_result);
             return;
           }
@@ -2015,17 +2013,17 @@
         rl_rhs = LoadValue(rl_rhs, kCoreReg);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory.
-          OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
+          OpMemReg(op, rl_result, rl_rhs.low_reg);
           return;
-        } else if (!IsFpReg(rl_result.reg.GetReg())) {
+        } else if (!IsFpReg(rl_result.low_reg)) {
           // Can do this directly into the result register.
-          OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+          OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
           StoreFinalValue(rl_dest, rl_result);
           return;
         } else {
           rl_lhs = LoadValue(rl_lhs, kCoreReg);
           rl_result = EvalLoc(rl_dest, kCoreReg, true);
-          OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+          OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
         }
       } else {
         // Try to use reg/memory instructions.
@@ -2037,34 +2035,34 @@
           rl_lhs = LoadValue(rl_lhs, kCoreReg);
           rl_rhs = LoadValue(rl_rhs, kCoreReg);
           rl_result = EvalLoc(rl_dest, kCoreReg, true);
-          OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+          OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
         } else {
           // We can optimize by moving to result and using memory operands.
           if (rl_rhs.location != kLocPhysReg) {
             // Force LHS into result.
             rl_result = EvalLoc(rl_dest, kCoreReg, true);
-            LoadValueDirect(rl_lhs, rl_result.reg.GetReg());
-            OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+            LoadValueDirect(rl_lhs, rl_result.low_reg);
+            OpRegMem(op, rl_result.low_reg, rl_rhs);
           } else if (rl_lhs.location != kLocPhysReg) {
             // RHS is in a register; LHS is in memory.
             if (op != kOpSub) {
               // Force RHS into result and operate on memory.
               rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
-              OpRegMem(op, rl_result.reg.GetReg(), rl_lhs);
+              OpRegCopy(rl_result.low_reg, rl_rhs.low_reg);
+              OpRegMem(op, rl_result.low_reg, rl_lhs);
             } else {
               // Subtraction isn't commutative.
               rl_lhs = LoadValue(rl_lhs, kCoreReg);
               rl_rhs = LoadValue(rl_rhs, kCoreReg);
               rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+              OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
             }
           } else {
             // Both are in registers.
             rl_lhs = LoadValue(rl_lhs, kCoreReg);
             rl_rhs = LoadValue(rl_rhs, kCoreReg);
             rl_result = EvalLoc(rl_dest, kCoreReg, true);
-            OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+            OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
           }
         }
       }
@@ -2075,10 +2073,10 @@
 
 bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
   // If we have non-core registers, then we can't do good things.
-  if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) {
+  if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.low_reg)) {
     return false;
   }
-  if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) {
+  if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.low_reg)) {
     return false;
   }
 
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 0b8e1ee..eea7191 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -49,19 +49,23 @@
 };
 
 RegLocation X86Mir2Lir::LocCReturn() {
-  return x86_loc_c_return;
+  RegLocation res = X86_LOC_C_RETURN;
+  return res;
 }
 
 RegLocation X86Mir2Lir::LocCReturnWide() {
-  return x86_loc_c_return_wide;
+  RegLocation res = X86_LOC_C_RETURN_WIDE;
+  return res;
 }
 
 RegLocation X86Mir2Lir::LocCReturnFloat() {
-  return x86_loc_c_return_float;
+  RegLocation res = X86_LOC_C_RETURN_FLOAT;
+  return res;
 }
 
 RegLocation X86Mir2Lir::LocCReturnDouble() {
-  return x86_loc_c_return_double;
+  RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+  return res;
 }
 
 // Return a target-dependent special register.
@@ -386,19 +390,19 @@
 
 RegLocation X86Mir2Lir::GetReturnWideAlt() {
   RegLocation res = LocCReturnWide();
-  CHECK(res.reg.GetReg() == rAX);
-  CHECK(res.reg.GetHighReg() == rDX);
+  CHECK(res.low_reg == rAX);
+  CHECK(res.high_reg == rDX);
   Clobber(rAX);
   Clobber(rDX);
   MarkInUse(rAX);
   MarkInUse(rDX);
-  MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+  MarkPair(res.low_reg, res.high_reg);
   return res;
 }
 
 RegLocation X86Mir2Lir::GetReturnAlt() {
   RegLocation res = LocCReturn();
-  res.reg.SetReg(rDX);
+  res.low_reg = rDX;
   Clobber(rDX);
   MarkInUse(rDX);
   return res;
@@ -426,21 +430,27 @@
   NewLIR0(kX86Mfence);
 #endif
 }
-
-// Alloc a pair of core registers, or a double.
-RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
+                          int reg_class) {
   int high_reg;
   int low_reg;
+  int res = 0;
 
   if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
     low_reg = AllocTempDouble();
     high_reg = low_reg;  // only one allocated!
-    // TODO: take advantage of 64-bit notation.
-    return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+    res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+    return res;
   }
+
   low_reg = AllocTemp();
   high_reg = AllocTemp();
-  return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+  res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+  return res;
 }
 
 int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -483,11 +493,11 @@
 
 void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
                      RegLocation rl_free) {
-  if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
-      (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+  if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+      (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
     // No overlap, free both
-    FreeTemp(rl_free.reg.GetReg());
-    FreeTemp(rl_free.reg.GetHighReg());
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
   }
 }
 
@@ -591,11 +601,11 @@
 
       if (match) {
         // We can reuse;update the register usage info.
+        loc.low_reg = info_lo->reg;
+        loc.high_reg = info_lo->reg;  // Play nice with existing code.
         loc.location = kLocPhysReg;
         loc.vec_len = kVectorLength8;
-        // TODO: use k64BitVector
-        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
-        DCHECK(IsFpReg(loc.reg.GetReg()));
+        DCHECK(IsFpReg(loc.low_reg));
         return loc;
       }
       // We can't easily reuse; clobber and free any overlaps.
@@ -625,10 +635,11 @@
       }
       if (match) {
         // Can reuse - update the register usage info
-        loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
+        loc.low_reg = info_lo->reg;
+        loc.high_reg = info_hi->reg;
         loc.location = kLocPhysReg;
-        MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
-        DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+        MarkPair(loc.low_reg, loc.high_reg);
+        DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
         return loc;
       }
       // Can't easily reuse - clobber and free any overlaps
@@ -652,6 +663,7 @@
 // TODO: Reunify with common code after 'pair mess' has been fixed
 RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
   DCHECK(loc.wide);
+  int32_t new_regs;
   int32_t low_reg;
   int32_t high_reg;
 
@@ -659,37 +671,38 @@
 
   /* If it is already in a register, we can assume proper form.  Is it the right reg class? */
   if (loc.location == kLocPhysReg) {
-    DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar());
-    if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+    DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar());
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
       /* It is the wrong register class.  Reallocate and copy. */
-      if (!IsFpReg(loc.reg.GetReg())) {
+      if (!IsFpReg(loc.low_reg)) {
         // We want this in a FP reg, and it is in core registers.
         DCHECK(reg_class != kCoreReg);
         // Allocate this into any FP reg, and mark it with the right size.
         low_reg = AllocTypedTemp(true, reg_class);
-        OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
-        CopyRegInfo(low_reg, loc.reg.GetReg());
-        Clobber(loc.reg.GetReg());
-        Clobber(loc.reg.GetHighReg());
-        loc.reg.SetReg(low_reg);
-        loc.reg.SetHighReg(low_reg);  // Play nice with existing code.
+        OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg);
+        CopyRegInfo(low_reg, loc.low_reg);
+        Clobber(loc.low_reg);
+        Clobber(loc.high_reg);
+        loc.low_reg = low_reg;
+        loc.high_reg = low_reg;  // Play nice with existing code.
         loc.vec_len = kVectorLength8;
       } else {
         // The value is in a FP register, and we want it in a pair of core registers.
         DCHECK_EQ(reg_class, kCoreReg);
-        DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg());
-        RegStorage new_regs = AllocTypedTempWide(false, kCoreReg);  // Force to core registers.
-        low_reg = new_regs.GetReg();
-        high_reg = new_regs.GetHighReg();
+        DCHECK_EQ(loc.low_reg, loc.high_reg);
+        new_regs = AllocTypedTempPair(false, kCoreReg);  // Force to core registers.
+        low_reg = new_regs & 0xff;
+        high_reg = (new_regs >> 8) & 0xff;
         DCHECK_NE(low_reg, high_reg);
-        OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
-        CopyRegInfo(low_reg, loc.reg.GetReg());
-        CopyRegInfo(high_reg, loc.reg.GetHighReg());
-        Clobber(loc.reg.GetReg());
-        Clobber(loc.reg.GetHighReg());
-        loc.reg = new_regs;
-        MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
-        DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+        OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
+        CopyRegInfo(low_reg, loc.low_reg);
+        CopyRegInfo(high_reg, loc.high_reg);
+        Clobber(loc.low_reg);
+        Clobber(loc.high_reg);
+        loc.low_reg = low_reg;
+        loc.high_reg = high_reg;
+        MarkPair(loc.low_reg, loc.high_reg);
+        DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
       }
     }
     return loc;
@@ -698,20 +711,21 @@
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
   DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
 
-  loc.reg = AllocTypedTempWide(loc.fp, reg_class);
+  new_regs = AllocTypedTempPair(loc.fp, reg_class);
+  loc.low_reg = new_regs & 0xff;
+  loc.high_reg = (new_regs >> 8) & 0xff;
 
-  // FIXME: take advantage of RegStorage notation.
-  if (loc.reg.GetReg() == loc.reg.GetHighReg()) {
-    DCHECK(IsFpReg(loc.reg.GetReg()));
+  if (loc.low_reg == loc.high_reg) {
+    DCHECK(IsFpReg(loc.low_reg));
     loc.vec_len = kVectorLength8;
   } else {
-    MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+    MarkPair(loc.low_reg, loc.high_reg);
   }
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(loc.reg.GetReg(), loc.s_reg_low);
-    if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
-      MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
+    MarkLive(loc.low_reg, loc.s_reg_low);
+    if (loc.low_reg != loc.high_reg) {
+      MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
     }
   }
   return loc;
@@ -727,14 +741,14 @@
   loc = UpdateLoc(loc);
 
   if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+    if (!RegClassMatches(reg_class, loc.low_reg)) {
       /* Wrong register class.  Realloc, copy and transfer ownership. */
       new_reg = AllocTypedTemp(loc.fp, reg_class);
-      OpRegCopy(new_reg, loc.reg.GetReg());
-      CopyRegInfo(new_reg, loc.reg.GetReg());
-      Clobber(loc.reg.GetReg());
-      loc.reg.SetReg(new_reg);
-      if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
+      OpRegCopy(new_reg, loc.low_reg);
+      CopyRegInfo(new_reg, loc.low_reg);
+      Clobber(loc.low_reg);
+      loc.low_reg = new_reg;
+      if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
         loc.vec_len = kVectorLength4;
     }
     return loc;
@@ -742,13 +756,14 @@
 
   DCHECK_NE(loc.s_reg_low, INVALID_SREG);
 
-  loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class));
-  if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
+  new_reg = AllocTypedTemp(loc.fp, reg_class);
+  loc.low_reg = new_reg;
+  if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
     loc.vec_len = kVectorLength4;
 
   if (update) {
     loc.location = kLocPhysReg;
-    MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+    MarkLive(loc.low_reg, loc.s_reg_low);
   }
   return loc;
 }
@@ -761,15 +776,15 @@
 // TODO: Reunify with common code after 'pair mess' has been fixed
 void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
   DCHECK(rl.wide);
-  RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
-  if (IsFpReg(rl.reg.GetReg())) {
+  RegisterInfo* p_low = IsTemp(rl.low_reg);
+  if (IsFpReg(rl.low_reg)) {
     // We are using only the low register.
     if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
       NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
     }
-    ResetDef(rl.reg.GetReg());
+    ResetDef(rl.low_reg);
   } else {
-    RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
+    RegisterInfo* p_high = IsTemp(rl.high_reg);
     if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
       DCHECK(p_low->pair);
       NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -777,8 +792,8 @@
     if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
       DCHECK(p_high->pair);
     }
-    ResetDef(rl.reg.GetReg());
-    ResetDef(rl.reg.GetHighReg());
+    ResetDef(rl.low_reg);
+    ResetDef(rl.high_reg);
   }
 }
 
@@ -817,8 +832,8 @@
              << (loc.high_word ? " h" : "  ")
              << (loc.home ? " H" : "  ")
              << " vec_len: " << loc.vec_len
-             << ", low: " << static_cast<int>(loc.reg.GetReg())
-             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
+             << ", low: " << static_cast<int>(loc.low_reg)
+             << ", high: " << static_cast<int>(loc.high_reg)
              << ", s_reg: " << loc.s_reg_low
              << ", orig: " << loc.orig_sreg;
 }
@@ -1021,8 +1036,8 @@
       // Runtime start index.
       rl_start = UpdateLoc(rl_start);
       if (rl_start.location == kLocPhysReg) {
-        length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr);
-        OpRegReg(kOpSub, rCX, rl_start.reg.GetReg());
+        length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr);
+        OpRegReg(kOpSub, rCX, rl_start.low_reg);
       } else {
         // Compare to memory to avoid a register load.  Handle pushed EDI.
         int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
@@ -1051,13 +1066,13 @@
     }
   } else {
     if (rl_start.location == kLocPhysReg) {
-      if (rl_start.reg.GetReg() == rDI) {
+      if (rl_start.low_reg == rDI) {
         // We have a slight problem here.  We are already using RDI!
         // Grab the value from the stack.
         LoadWordDisp(rX86_SP, 0, rDX);
         OpLea(rDI, rBX, rDX, 1, 0);
       } else {
-        OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0);
+        OpLea(rDI, rBX, rl_start.low_reg, 1, 0);
       }
     } else {
       OpRegCopy(rDI, rBX);
@@ -1079,14 +1094,14 @@
   // index = ((curr_ptr - orig_ptr) / 2) - 1.
   OpRegReg(kOpSub, rDI, rBX);
   OpRegImm(kOpAsr, rDI, 1);
-  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
+  NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1);
   LIR *all_done = NewLIR1(kX86Jmp8, 0);
 
   // Failed to match; return -1.
   LIR *not_found = NewLIR0(kPseudoTargetLabel);
   length_compare->target = not_found;
   failed_branch->target = not_found;
-  LoadConstantNoClobber(rl_return.reg.GetReg(), -1);
+  LoadConstantNoClobber(rl_return.low_reg, -1);
 
   // And join up at the end.
   all_done->target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index d5d6b0e..48a39bb 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -514,7 +514,7 @@
         // We don't know the proper offset for the value, so pick one that will force
         // 4 byte offset.  We will fix this up in the assembler later to have the right
         // value.
-        res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
+        res = LoadBaseDisp(rl_method.low_reg, 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
         SetMemRefType(res, true, kLiteral);
@@ -714,7 +714,7 @@
       opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
       break;
     default:
-      LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
+      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
   }
 
   if (!is_array) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 1df9ab1..4064bd6 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -126,6 +126,13 @@
 /* Mask to strip off fp flags */
 #define X86_FP_REG_MASK 0xF
 
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
+//                               location,     wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg,     s_reg_low
+#define X86_LOC_C_RETURN             {kLocPhysReg, 0,    0,       0,     0,  0,    0,   0,        1,    kVectorNotUsed, rAX,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_WIDE        {kLocPhysReg, 1,    0,       0,     0,  0,    0,   0,        1,    kVectorNotUsed, rAX,    rDX,         INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_FLOAT       {kLocPhysReg, 0,    0,       0,     1,  0,    0,   0,        1,    kVectorLength4, fr0,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_DOUBLE      {kLocPhysReg, 1,    0,       0,     1,  0,    0,   0,        1,    kVectorLength8, fr0,    fr0,         INVALID_SREG, INVALID_SREG}
+
 enum X86ResourceEncodingPos {
   kX86GPReg0   = 0,
   kX86RegSP    = 4,
@@ -204,22 +211,6 @@
 #define rX86_COUNT rCX
 #define rX86_PC INVALID_REG
 
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-const RegLocation x86_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-     RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
-     RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
-const RegLocation x86_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
-     RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
-const RegLocation x86_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
-     RegStorage(RegStorage::k64BitPair, fr0, fr1), INVALID_SREG, INVALID_SREG};
-
 /*
  * The following enum defines the list of supported X86 instructions by the
  * assembler. Their corresponding EncodingMap positions will be defined in