Rewrite use/def masks to support 128 bits.

Reduce LIR memory usage by holding masks by pointers in the
LIR rather than directly and using pre-defined const masks
for the common cases, allocating very few on the arena.

Change-Id: I0f6d27ef6867acd157184c8c74f9612cebfe6c16
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 0a8193a..d37ee67 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1541,7 +1541,9 @@
                   << " delta: " << delta << " old delta: " << lir->operands[0];
             }
             lir->opcode = kX86Jcc32;
-            SetupResourceMasks(lir);
+            lir->flags.size = GetInsnSize(lir);
+            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
+            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
             res = kRetryAll;
           }
           if (kVerbosePcFixup) {
@@ -1605,7 +1607,9 @@
               LOG(INFO) << "Retry for JMP growth at " << lir->offset;
             }
             lir->opcode = kX86Jmp32;
-            SetupResourceMasks(lir);
+            lir->flags.size = GetInsnSize(lir);
+            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
+            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
             res = kRetryAll;
           }
           lir->operands[0] = delta;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 61c9f4f..6ae553d 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -99,7 +99,7 @@
   RegLocation LocCReturnDouble();
   RegLocation LocCReturnFloat();
   RegLocation LocCReturnWide();
-  uint64_t GetRegMaskCommon(RegStorage reg);
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
   void AdjustSpillMask();
   void ClobberCallerSave();
   void FreeCallTemps();
@@ -113,12 +113,13 @@
   int AssignInsnOffsets();
   void AssignOffsets();
   AssemblerStatus AssembleInstructions(CodeOffset start_addr);
-  void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
-  void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
   const char* GetTargetInstFmt(int opcode);
   const char* GetTargetInstName(int opcode);
   std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-  uint64_t GetPCUseDefEncoding();
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
   uint64_t GetTargetInstFlags(int opcode);
   int GetInsnSize(LIR* lir);
   bool IsUnconditionalBranch(LIR* lir);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index c3580f7..ced6400 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -147,6 +147,9 @@
   // Update the in-register state of source.
   rl_src = UpdateLocWide(rl_src);
 
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   // If the source is in physical register, then put it in its location on stack.
   if (rl_src.location == kLocPhysReg) {
     RegisterInfo* reg_info = GetRegInfo(rl_src.reg);
@@ -191,15 +194,12 @@
      * right class. So we call EvalLoc(Wide) first which will ensure that it will get moved to the
      * correct register class.
      */
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
     if (is_double) {
-      rl_result = EvalLocWide(rl_dest, kFPReg, true);
-
       LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64);
 
       StoreFinalValueWide(rl_dest, rl_result);
     } else {
-      rl_result = EvalLoc(rl_dest, kFPReg, true);
-
       Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
 
       StoreFinalValue(rl_dest, rl_result);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a050a05..4a77df2 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -794,34 +794,61 @@
     RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_rBX, rs_rCX);
     LoadValueDirectWideFixed(rl_src_expected, r_tmp1);
     LoadValueDirectWideFixed(rl_src_new_value, r_tmp2);
-    NewLIR1(kX86Push32R, rs_rDI.GetReg());
-    MarkTemp(rs_rDI);
-    LockTemp(rs_rDI);
-    NewLIR1(kX86Push32R, rs_rSI.GetReg());
-    MarkTemp(rs_rSI);
-    LockTemp(rs_rSI);
-    const int push_offset = 4 /* push edi */ + 4 /* push esi */;
-    int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0
-                : (IsInReg(this, rl_src_obj, rs_rDI) ? 4
-                : (SRegOffset(rl_src_obj.s_reg_low) + push_offset));
     // FIXME: needs 64-bit update.
-    LoadWordDisp(TargetReg(kSp), srcObjSp, rs_rDI);
-    int srcOffsetSp = IsInReg(this, rl_src_offset, rs_rSI) ? 0
-                   : (IsInReg(this, rl_src_offset, rs_rDI) ? 4
-                   : (SRegOffset(rl_src_offset.s_reg_low) + push_offset));
-    LoadWordDisp(TargetReg(kSp), srcOffsetSp, rs_rSI);
-    NewLIR4(kX86LockCmpxchg64A, rs_rDI.GetReg(), rs_rSI.GetReg(), 0, 0);
+    const bool obj_in_di = IsInReg(this, rl_src_obj, rs_rDI);
+    const bool obj_in_si = IsInReg(this, rl_src_obj, rs_rSI);
+    DCHECK(!obj_in_si || !obj_in_di);
+    const bool off_in_di = IsInReg(this, rl_src_offset, rs_rDI);
+    const bool off_in_si = IsInReg(this, rl_src_offset, rs_rSI);
+    DCHECK(!off_in_si || !off_in_di);
+    // If obj/offset is in a reg, use that reg. Otherwise, use the empty reg.
+    RegStorage rs_obj = obj_in_di ? rs_rDI : obj_in_si ? rs_rSI : !off_in_di ? rs_rDI : rs_rSI;
+    RegStorage rs_off = off_in_si ? rs_rSI : off_in_di ? rs_rDI : !obj_in_si ? rs_rSI : rs_rDI;
+    bool push_di = (!obj_in_di && !off_in_di) && (rs_obj == rs_rDI || rs_off == rs_rDI);
+    bool push_si = (!obj_in_si && !off_in_si) && (rs_obj == rs_rSI || rs_off == rs_rSI);
+    if (push_di) {
+      NewLIR1(kX86Push32R, rs_rDI.GetReg());
+      MarkTemp(rs_rDI);
+      LockTemp(rs_rDI);
+    }
+    if (push_si) {
+      NewLIR1(kX86Push32R, rs_rSI.GetReg());
+      MarkTemp(rs_rSI);
+      LockTemp(rs_rSI);
+    }
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+    const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
+    if (!obj_in_si && !obj_in_di) {
+      LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
+      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
+    }
+    if (!off_in_si && !off_in_di) {
+      LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
+      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
+    }
+    NewLIR4(kX86LockCmpxchg64A, rs_obj.GetReg(), rs_off.GetReg(), 0, 0);
 
     // After a store we need to insert barrier in case of potential load. Since the
     // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
     GenMemBarrier(kStoreLoad);
 
-    FreeTemp(rs_rSI);
-    UnmarkTemp(rs_rSI);
-    NewLIR1(kX86Pop32R, rs_rSI.GetReg());
-    FreeTemp(rs_rDI);
-    UnmarkTemp(rs_rDI);
-    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+
+    if (push_si) {
+      FreeTemp(rs_rSI);
+      UnmarkTemp(rs_rSI);
+      NewLIR1(kX86Pop32R, rs_rSI.GetReg());
+    }
+    if (push_di) {
+      FreeTemp(rs_rDI);
+      UnmarkTemp(rs_rDI);
+      NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+    }
     FreeCallTemps();
   } else {
     // EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
@@ -885,11 +912,11 @@
   // We don't know the proper offset for the value, so pick one that will force
   // 4 byte offset.  We will fix this up in the assembler later to have the right
   // value.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256,
                     0, 0, target);
   res->target = target;
   res->flags.fixup = kFixupLoad;
-  SetMemRefType(res, true, kLiteral);
   store_method_addr_used_ = true;
   return res;
 }
@@ -1077,6 +1104,9 @@
 }
 
 void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   LIR *m;
   switch (val) {
     case 0:
@@ -1095,6 +1125,9 @@
 
 void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
                             RegLocation rl_src2) {
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   if (Gen64Bit()) {
     if (rl_src1.is_const) {
       std::swap(rl_src1, rl_src2);
@@ -1346,6 +1379,7 @@
   int r_base = TargetReg(kSp).GetReg();
   int displacement = SRegOffset(rl_src.s_reg_low);
 
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   LIR *lir = NewLIR3(x86op, Gen64Bit() ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET);
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
                           true /* is_load */, true /* is64bit */);
@@ -1379,6 +1413,7 @@
   int r_base = TargetReg(kSp).GetReg();
   int displacement = SRegOffset(rl_dest.s_reg_low);
 
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET,
                      Gen64Bit() ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg());
   AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
@@ -2061,6 +2096,7 @@
       int r_base = TargetReg(kSp).GetReg();
       int displacement = SRegOffset(rl_dest.s_reg_low);
 
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val);
       LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val);
       AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
@@ -2091,6 +2127,7 @@
     int r_base = TargetReg(kSp).GetReg();
     int displacement = SRegOffset(rl_dest.s_reg_low);
 
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     if (!IsNoOp(op, val_lo)) {
       X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
       LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo);
@@ -2469,6 +2506,9 @@
     return;
   }
 
+  // If we generate any memory access below, it will reference a dalvik reg.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
   if (unary) {
     rl_lhs = LoadValue(rl_lhs, kCoreReg);
     rl_result = UpdateLocTyped(rl_dest, kCoreReg);
@@ -2620,6 +2660,7 @@
     NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   } else {
     int displacement = SRegOffset(rl_src.s_reg_low);
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(),
                      displacement + LOWORD_OFFSET);
     AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -2670,6 +2711,7 @@
     rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
     if (rl_result.location != kLocPhysReg) {
       // Okay, we can do this into memory
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       OpMemReg(op, rl_result, t_reg.GetReg());
     } else if (!rl_result.reg.IsFloat()) {
       // Can do this directly into the result register
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index ec165af..d1ba239 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -206,77 +206,70 @@
 /*
  * Decode the register id.
  */
-uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
-  uint64_t seed;
-  int shift;
-  int reg_id;
-
-  reg_id = reg.GetRegNum();
-  /* Double registers in x86 are just a single FP register */
-  seed = 1;
-  /* FP register starts at bit position 16 */
-  shift = (reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0;
-  /* Expand the double register id into single offset */
-  shift += reg_id;
-  return (seed << shift);
+ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+  /* Double registers in x86 are just a single FP register. This is always just a single bit. */
+  return ResourceMask::Bit(
+      /* FP register starts at bit position 16 */
+      ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
 }
 
-uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
+ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
   /*
    * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
    * able to clean up some of the x86/Arm_Mips differences
    */
   LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
-  return 0ULL;
+  return kEncodeNone;
 }
 
-void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
+void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                          ResourceMask* use_mask, ResourceMask* def_mask) {
   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
   DCHECK(!lir->flags.use_def_invalid);
 
   // X86-specific resource map setup here.
   if (flags & REG_USE_SP) {
-    lir->u.m.use_mask |= ENCODE_X86_REG_SP;
+    use_mask->SetBit(kX86RegSP);
   }
 
   if (flags & REG_DEF_SP) {
-    lir->u.m.def_mask |= ENCODE_X86_REG_SP;
+    def_mask->SetBit(kX86RegSP);
   }
 
   if (flags & REG_DEFA) {
-    SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
+    SetupRegMask(def_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_DEFD) {
-    SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
+    SetupRegMask(def_mask, rs_rDX.GetReg());
   }
   if (flags & REG_USEA) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
+    SetupRegMask(use_mask, rs_rAX.GetReg());
   }
 
   if (flags & REG_USEC) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
+    SetupRegMask(use_mask, rs_rCX.GetReg());
   }
 
   if (flags & REG_USED) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
+    SetupRegMask(use_mask, rs_rDX.GetReg());
   }
 
   if (flags & REG_USEB) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
+    SetupRegMask(use_mask, rs_rBX.GetReg());
   }
 
   // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
   if (lir->opcode == kX86RepneScasw) {
-    SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
-    SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
-    SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
-    SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
+    SetupRegMask(use_mask, rs_rAX.GetReg());
+    SetupRegMask(use_mask, rs_rCX.GetReg());
+    SetupRegMask(use_mask, rs_rDI.GetReg());
+    SetupRegMask(def_mask, rs_rDI.GetReg());
   }
 
   if (flags & USE_FP_STACK) {
-    lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
-    lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
+    use_mask->SetBit(kX86FPStack);
+    def_mask->SetBit(kX86FPStack);
   }
 }
 
@@ -368,40 +361,40 @@
   return buf;
 }
 
-void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
+void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
   char buf[256];
   buf[0] = 0;
 
-  if (mask == ENCODE_ALL) {
+  if (mask.Equals(kEncodeAll)) {
     strcpy(buf, "all");
   } else {
     char num[8];
     int i;
 
     for (i = 0; i < kX86RegEnd; i++) {
-      if (mask & (1ULL << i)) {
+      if (mask.HasBit(i)) {
         snprintf(num, arraysize(num), "%d ", i);
         strcat(buf, num);
       }
     }
 
-    if (mask & ENCODE_CCODE) {
+    if (mask.HasBit(ResourceMask::kCCode)) {
       strcat(buf, "cc ");
     }
     /* Memory bits */
-    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+    if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
       snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
                DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
                (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
     }
-    if (mask & ENCODE_LITERAL) {
+    if (mask.HasBit(ResourceMask::kLiteral)) {
       strcat(buf, "lit ");
     }
 
-    if (mask & ENCODE_HEAP_REF) {
+    if (mask.HasBit(ResourceMask::kHeapRef)) {
       strcat(buf, "heap ");
     }
-    if (mask & ENCODE_MUST_NOT_ALIAS) {
+    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
       strcat(buf, "noalias ");
     }
   }
@@ -551,7 +544,7 @@
   } else {
     // Mark as a scheduling barrier.
     DCHECK(!mem_barrier->flags.use_def_invalid);
-    mem_barrier->u.m.def_mask = ENCODE_ALL;
+    mem_barrier->u.m.def_mask = &kEncodeAll;
   }
   return ret;
 #else
@@ -822,6 +815,7 @@
     int r_base = TargetReg(kSp).GetReg();
     int displacement = SRegOffset(rl_dest.s_reg_low);
 
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
     AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
                               false /* is_load */, true /* is64bit */);
@@ -1109,7 +1103,10 @@
       } else {
         // Load the start index from stack, remembering that we pushed EDI.
         int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
-        Load32Disp(rs_rX86_SP, displacement, rs_rBX);
+        {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+          Load32Disp(rs_rX86_SP, displacement, rs_rBX);
+        }
         OpRegReg(kOpXor, rs_rDI, rs_rDI);
         OpRegReg(kOpCmp, rs_rBX, rs_rDI);
         OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
@@ -1413,10 +1410,10 @@
   // We don't know the proper offset for the value, so pick one that will force
   // 4 byte offset.  We will fix this up in the assembler later to have the right
   // value.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
   LIR *load = NewLIR3(kX86Mova128RM, reg, rl_method.reg.GetReg(),  256 /* bogus */);
   load->flags.fixup = kFixupLoad;
   load->target = data_target;
-  SetMemRefType(load, true, kLiteral);
 }
 
 void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
@@ -1856,6 +1853,7 @@
    * end up half-promoted.  In those cases, we must flush the promoted
    * half to memory as well.
    */
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
   for (int i = 0; i < cu_->num_ins; i++) {
     PromotionMap* v_map = &promotion_map_[start_vreg + i];
     RegStorage reg = RegStorage::InvalidReg();
@@ -1986,12 +1984,14 @@
       if (loc.wide) {
         loc = UpdateLocWide(loc);
         if (loc.location == kLocPhysReg) {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
           StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
         }
         next_arg += 2;
       } else {
         loc = UpdateLoc(loc);
         if (loc.location == kLocPhysReg) {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
           StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
         }
         next_arg++;
@@ -2008,6 +2008,8 @@
     int current_src_offset = start_offset;
     int current_dest_offset = outs_offset;
 
+    // Only davik regs are accessed in this loop; no next_call_insn() calls.
+    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
     while (regs_left_to_pass_via_stack > 0) {
       // This is based on the knowledge that the stack itself is 16-byte aligned.
       bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
@@ -2045,6 +2047,7 @@
         bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
         bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
 
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
         if (src_is_16b_aligned) {
           ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
         } else if (src_is_8b_aligned) {
@@ -2074,8 +2077,7 @@
             AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
           } else {
             // Set barrier for 128-bit load.
-            SetMemRefType(ld1, true /* is_load */, kDalvikReg);
-            ld1->u.m.def_mask = ENCODE_ALL;
+            ld1->u.m.def_mask = &kEncodeAll;
           }
         }
         if (st1 != nullptr) {
@@ -2085,8 +2087,7 @@
             AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
           } else {
             // Set barrier for 128-bit store.
-            SetMemRefType(st1, false /* is_load */, kDalvikReg);
-            st1->u.m.def_mask = ENCODE_ALL;
+            st1->u.m.def_mask = &kEncodeAll;
           }
         }
 
@@ -2123,20 +2124,23 @@
       if (!reg.Valid()) {
         int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
 
-        if (rl_arg.wide) {
-          if (rl_arg.location == kLocPhysReg) {
-            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+        {
+          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+          if (rl_arg.wide) {
+            if (rl_arg.location == kLocPhysReg) {
+              StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+            } else {
+              LoadValueDirectWideFixed(rl_arg, regWide);
+              StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
+            }
+            i++;
           } else {
-            LoadValueDirectWideFixed(rl_arg, regWide);
-            StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
-          }
-          i++;
-        } else {
-          if (rl_arg.location == kLocPhysReg) {
-            StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
-          } else {
-            LoadValueDirectFixed(rl_arg, regSingle);
-            StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+            if (rl_arg.location == kLocPhysReg) {
+              StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
+            } else {
+              LoadValueDirectFixed(rl_arg, regSingle);
+              StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+            }
           }
         }
         call_state = next_call_insn(cu_, info, call_state, target_method,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index d074d81..c72e8cd 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -376,7 +376,8 @@
       break;
   }
   LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
-  if (r_base == rs_rX86_SP) {
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    DCHECK(r_base == rs_rX86_SP);
     AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
   }
   return l;
@@ -403,8 +404,10 @@
       break;
   }
   LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
-  AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
-  AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+    AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
+  }
   return l;
 }
 
@@ -427,7 +430,9 @@
       break;
   }
   LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
-  AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
+  }
   return l;
 }
 
@@ -575,11 +580,11 @@
         // We don't know the proper offset for the value, so pick one that will force
         // 4 byte offset.  We will fix this up in the assembler later to have the right
         // value.
+        ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
         res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val),
                            kDouble);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
-        SetMemRefType(res, true, kLiteral);
         store_method_addr_used_ = true;
       } else {
         if (val_lo == 0) {
@@ -684,7 +689,8 @@
                         displacement + HIWORD_OFFSET);
       }
     }
-    if (r_base == rs_rX86_SP) {
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK(r_base == rs_rX86_SP);
       AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               true /* is_load */, is64bit);
       if (pair) {
@@ -815,7 +821,8 @@
       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
       store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
     }
-    if (r_base == rs_rX86_SP) {
+    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+      DCHECK(r_base == rs_rX86_SP);
       AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
                               false /* is_load */, is64bit);
       if (pair) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 5022529..f1b5811 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -111,9 +111,6 @@
   kX86RegEnd   = kX86FPStack,
 };
 
-#define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
-#define ENCODE_X86_FP_STACK         (1ULL << kX86FPStack)
-
 // FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
 enum X86NativeRegisterPool {
   r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,