Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.

Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.

Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index dce2b73..85a3b32 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -547,6 +547,11 @@
   { kX86RepneScasw,    kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
 };
 
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+  os << X86Mir2Lir::EncodingMap[rhs].name;
+  return os;
+}
+
 static bool NeedsRex(int32_t raw_reg) {
   return RegStorage::RegNum(raw_reg) > 7;
 }
@@ -1631,6 +1636,7 @@
  * sequence or request that the trace be shortened and retried.
  */
 AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+  UNUSED(start_addr);
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success
 
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e..497ef94 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -290,9 +290,10 @@
  */
 static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
                              int state, const MethodReference& target_method,
-                             uint32_t unused,
+                             uint32_t,
                              uintptr_t direct_code, uintptr_t direct_method,
                              InvokeType type) {
+  UNUSED(info, direct_code);
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
   if (direct_method != 0) {
     switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7b5b831..dec99ae 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -250,7 +250,7 @@
   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
   void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                         int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        int dest_reg_class) OVERRIDE;
+                        RegisterClass dest_reg_class) OVERRIDE;
   bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
   void GenMoveException(RegLocation rl_dest) OVERRIDE;
   void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -499,7 +499,7 @@
   void GenConstWide(RegLocation rl_dest, int64_t value);
   void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
   void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
-  void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+  void GenShiftByteVector(MIR* mir);
   void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
                              uint32_t m4);
   void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -557,88 +557,80 @@
 
   /*
    * @brief Load 128 bit constant into vector register.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector
    * @note vA is the TypeSize for the register.
    * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
    */
-  void GenConst128(BasicBlock* bb, MIR* mir);
+  void GenConst128(MIR* mir);
 
   /*
    * @brief MIR to move a vectorized register to another.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination
    * @note vC: source
    */
-  void GenMoveVector(BasicBlock *bb, MIR *mir);
+  void GenMoveVector(MIR* mir);
 
   /*
    * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
    * the type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+  void GenMultiplyVector(MIR* mir);
 
   /*
    * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenAddVector(BasicBlock *bb, MIR *mir);
+  void GenAddVector(MIR* mir);
 
   /*
    * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenSubtractVector(BasicBlock *bb, MIR *mir);
+  void GenSubtractVector(MIR* mir);
 
   /*
    * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+  void GenShiftLeftVector(MIR* mir);
 
   /*
    * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
    * know the type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+  void GenSignedShiftRightVector(MIR* mir);
 
   /*
    * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
    * to know the type of the vector.
-   * @param bb The basic block in which the MIR is from..
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: immediate
    */
-  void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+  void GenUnsignedShiftRightVector(MIR* mir);
 
   /*
    * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -647,51 +639,47 @@
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenAndVector(BasicBlock *bb, MIR *mir);
+  void GenAndVector(MIR* mir);
 
   /*
    * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenOrVector(BasicBlock *bb, MIR *mir);
+  void GenOrVector(MIR* mir);
 
   /*
    * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
    * type of the vector.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
    */
-  void GenXorVector(BasicBlock *bb, MIR *mir);
+  void GenXorVector(MIR* mir);
 
   /*
    * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
    * @note vA: TypeSize
    * @note vB: destination and source VR (not vector register)
    * @note vC: source (vector register)
    */
-  void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+  void GenAddReduceVector(MIR* mir);
 
   /*
    * @brief Extract a packed element into a single VR.
-   * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
    * @note vB: destination VR (not vector register)
    * @note vC: source (vector register)
    * @note arg[0]: The index to use for extraction from vector register (which packed element).
    */
-  void GenReduceVector(BasicBlock *bb, MIR *mir);
+  void GenReduceVector(MIR* mir);
 
   /*
    * @brief Create a vector value, with all TypeSize values equal to vC
@@ -701,21 +689,21 @@
    * @note vB: destination vector register.
    * @note vC: source VR (not vector register).
    */
-  void GenSetVector(BasicBlock *bb, MIR *mir);
+  void GenSetVector(MIR* mir);
 
   /**
    * @brief Used to generate code for kMirOpPackedArrayGet.
    * @param bb The basic block of MIR.
    * @param mir The mir whose opcode is kMirOpPackedArrayGet.
    */
-  void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+  void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
 
   /**
    * @brief Used to generate code for kMirOpPackedArrayPut.
    * @param bb The basic block of MIR.
    * @param mir The mir whose opcode is kMirOpPackedArrayPut.
    */
-  void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+  void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Generate code for a vector opcode.
@@ -890,8 +878,8 @@
    * the value is live in a temp register of the correct class.  Additionally, if the value is in
    * a temp register of the wrong register class, it will be clobbered.
    */
-  RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
-  RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+  RegLocation UpdateLocTyped(RegLocation loc);
+  RegLocation UpdateLocWideTyped(RegLocation loc);
 
   /*
    * @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -902,7 +890,7 @@
    * @brief Analyze one basic block.
    * @param bb Basic block to analyze.
    */
-  void AnalyzeBB(BasicBlock * bb);
+  void AnalyzeBB(BasicBlock* bb);
 
   /*
    * @brief Analyze one extended MIR instruction
@@ -910,7 +898,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Extended instruction to analyze.
    */
-  void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+  void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one MIR instruction
@@ -918,7 +906,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+  virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one MIR float/double instruction
@@ -926,7 +914,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+  virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
 
   /*
    * @brief Analyze one use of a double operand.
@@ -940,7 +928,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+  void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
 
   // Information derived from analysis of MIR
 
@@ -987,12 +975,11 @@
    */
   LIR* AddVectorLiteral(int32_t* constants);
 
-  InToRegStorageMapping in_to_reg_storage_mapping_;
-
-  bool WideGPRsAreAliases() OVERRIDE {
+  bool WideGPRsAreAliases() const OVERRIDE {
     return cu_->target64;  // On 64b, we have 64b GPRs.
   }
-  bool WideFPRsAreAliases() OVERRIDE {
+
+  bool WideFPRsAreAliases() const OVERRIDE {
     return true;  // xmm registers have 64b views even on x86.
   }
 
@@ -1002,11 +989,17 @@
    */
   static void DumpRegLocation(RegLocation loc);
 
-  static const X86EncodingMap EncodingMap[kX86Last];
+  InToRegStorageMapping in_to_reg_storage_mapping_;
 
  private:
   void SwapBits(RegStorage result_reg, int shift, int32_t value);
   void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+  static const X86EncodingMap EncodingMap[kX86Last];
+
+  friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+  DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5c..254d90f 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -169,8 +169,7 @@
    * If the result's location is in memory, then we do not need to do anything
    * more since the fstp has already placed the correct value in memory.
    */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
-      UpdateLocTyped(rl_dest, kFPReg);
+  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
   if (rl_result.location == kLocPhysReg) {
     /*
      * We already know that the result is in a physical register but do not know if it is the
@@ -431,8 +430,7 @@
    * If the result's location is in memory, then we do not need to do anything
    * more since the fstp has already placed the correct value in memory.
    */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
-      UpdateLocTyped(rl_dest, kFPReg);
+  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
   if (rl_result.location == kLocPhysReg) {
     rl_result = EvalLoc(rl_dest, kFPReg, true);
     if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index aa1bf7f..7229318 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@
 
 void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  int dest_reg_class) {
+                                  RegisterClass dest_reg_class) {
   DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
   DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
 
@@ -268,6 +268,7 @@
 }
 
 void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   RegLocation rl_result;
   RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+  UNUSED(rl_dest, reg_lo, lit, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@
 
 RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
                                   bool is_div) {
+  UNUSED(rl_dest, reg_lo, reg_hi, is_div);
   LOG(FATAL) << "Unexpected use of GenDivRem for x86";
-  return rl_dest;
+  UNREACHABLE();
 }
 
 RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                                   RegLocation rl_src2, bool is_div, int flags) {
+  UNUSED(rl_dest);
   // We have to use fixed registers, so flush all the temps.
 
   // Prepare for explicit register usage.
@@ -1022,7 +1026,7 @@
     DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
     // In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
     if (!cu_->target64 && size == kSignedByte) {
-      rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+      rl_src_value = UpdateLocTyped(rl_src_value);
       if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
         RegStorage temp = AllocateByteRegister();
         OpRegCopy(temp, rl_src_value.reg);
@@ -1309,18 +1313,21 @@
 }
 
 LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVldm for x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+  UNUSED(r_base, count);
   LOG(FATAL) << "Unexpected use of OpVstm for x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
                                                RegLocation rl_result, int lit,
                                                int first_bit, int second_bit) {
+  UNUSED(lit);
   RegStorage t_reg = AllocTemp();
   OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
   OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1453,22 +1460,27 @@
 
 bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
                                     RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
-  return false;
+  UNREACHABLE();
 }
 
 bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+  UNUSED(rl_src, rl_dest, lit);
   LOG(FATAL) << "Unexpected use of easyMultiply in x86";
-  return false;
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+  UNUSED(cond, guide);
   LOG(FATAL) << "Unexpected use of OpIT in x86";
-  return NULL;
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::OpEndIT(LIR* it) {
+  UNUSED(it);
   LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+  UNREACHABLE();
 }
 
 void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1486,6 +1498,7 @@
 }
 
 void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+  UNUSED(sreg);
   // All memory accesses below reference dalvik regs.
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
 
@@ -1616,7 +1629,7 @@
     int32_t val_hi = High32Bits(val);
     // Prepare for explicit register usage.
     ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-    rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+    rl_src1 = UpdateLocWideTyped(rl_src1);
     bool src1_in_reg = rl_src1.location == kLocPhysReg;
     int displacement = SRegOffset(rl_src1.s_reg_low);
 
@@ -1700,8 +1713,8 @@
 
   // Prepare for explicit register usage.
   ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
-  rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+  rl_src1 = UpdateLocWideTyped(rl_src1);
+  rl_src2 = UpdateLocWideTyped(rl_src2);
 
   // At this point, the VRs are in their home locations.
   bool src1_in_reg = rl_src1.location == kLocPhysReg;
@@ -1837,12 +1850,12 @@
 }
 
 void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
   if (rl_dest.location == kLocPhysReg) {
     // Ensure we are in a register pair
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
 
-    rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+    rl_src = UpdateLocWideTyped(rl_src);
     GenLongRegOrMemOp(rl_result, rl_src, op);
     StoreFinalValueWide(rl_dest, rl_result);
     return;
@@ -1850,7 +1863,7 @@
     // Handle the case when src and dest are intersect.
     rl_src = LoadValueWide(rl_src, kCoreReg);
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+    rl_src = UpdateLocWideTyped(rl_src);
     GenLongRegOrMemOp(rl_result, rl_src, op);
     StoreFinalValueWide(rl_dest, rl_result);
     return;
@@ -1910,7 +1923,7 @@
     rl_result = ForceTempWide(rl_result);
 
     // Perform the operation using the RHS.
-    rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+    rl_src2 = UpdateLocWideTyped(rl_src2);
     GenLongRegOrMemOp(rl_result, rl_src2, op);
 
     // And now record that the result is in the temp.
@@ -1919,10 +1932,9 @@
   }
 
   // It wasn't in registers, so it better be in memory.
-  DCHECK((rl_dest.location == kLocDalvikFrame) ||
-         (rl_dest.location == kLocCompilerTemp));
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
-  rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+  DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+  rl_src1 = UpdateLocWideTyped(rl_src1);
+  rl_src2 = UpdateLocWideTyped(rl_src2);
 
   // Get one of the source operands into temporary register.
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2088,7 +2100,7 @@
       NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
     } else {
       // Only need this once.  Multiply directly from the value.
-      rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+      rl_src = UpdateLocWideTyped(rl_src);
       if (rl_src.location != kLocPhysReg) {
         // Okay, we can do this from memory.
         ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2395,6 +2407,7 @@
 
 RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                           RegLocation rl_src, int shift_amount, int flags) {
+  UNUSED(flags);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   if (cu_->target64) {
     OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
@@ -2692,7 +2705,7 @@
       return in_mem ? kX86Xor32MI : kX86Xor32RI;
     default:
       LOG(FATAL) << "Unexpected opcode: " << op;
-      return kX86Add32MI;
+      UNREACHABLE();
   }
 }
 
@@ -2706,7 +2719,7 @@
       return false;
     }
 
-    rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+    rl_dest = UpdateLocWideTyped(rl_dest);
 
     if ((rl_dest.location == kLocDalvikFrame) ||
         (rl_dest.location == kLocCompilerTemp)) {
@@ -2736,7 +2749,7 @@
 
   int32_t val_lo = Low32Bits(val);
   int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
 
   // Can we just do this into memory?
   if ((rl_dest.location == kLocDalvikFrame) ||
@@ -2812,8 +2825,8 @@
 
   int32_t val_lo = Low32Bits(val);
   int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
-  rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+  rl_dest = UpdateLocWideTyped(rl_dest);
+  rl_src1 = UpdateLocWideTyped(rl_src1);
 
   // Can we do this directly into the destination registers?
   if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -3035,7 +3048,7 @@
 
   if (unary) {
     rl_lhs = LoadValue(rl_lhs, kCoreReg);
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
     OpRegReg(op, rl_result.reg, rl_lhs.reg);
   } else {
@@ -3045,7 +3058,7 @@
       LoadValueDirectFixed(rl_rhs, t_reg);
       if (is_two_addr) {
         // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory
           OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3068,12 +3081,12 @@
       // Multiply is 3 operand only (sort of).
       if (is_two_addr && op != kOpMul) {
         // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location == kLocPhysReg) {
           // Ensure res is in a core reg
           rl_result = EvalLoc(rl_dest, kCoreReg, true);
           // Can we do this from memory directly?
-          rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+          rl_rhs = UpdateLocTyped(rl_rhs);
           if (rl_rhs.location != kLocPhysReg) {
             OpRegMem(op, rl_result.reg, rl_rhs);
             StoreFinalValue(rl_dest, rl_result);
@@ -3088,7 +3101,7 @@
         // It might happen rl_rhs and rl_dest are the same VR
         // in this case rl_dest is in reg after LoadValue while
         // rl_result is not updated yet, so do this
-        rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+        rl_result = UpdateLocTyped(rl_dest);
         if (rl_result.location != kLocPhysReg) {
           // Okay, we can do this into memory.
           OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3105,8 +3118,8 @@
         }
       } else {
         // Try to use reg/memory instructions.
-        rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
-        rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+        rl_lhs = UpdateLocTyped(rl_lhs);
+        rl_rhs = UpdateLocTyped(rl_rhs);
         // We can't optimize with FP registers.
         if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
           // Something is difficult, so fall back to the standard case.
@@ -3178,7 +3191,7 @@
     Mir2Lir::GenIntToLong(rl_dest, rl_src);
     return;
   }
-  rl_src = UpdateLocTyped(rl_src, kCoreReg);
+  rl_src = UpdateLocTyped(rl_src);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   if (rl_src.location == kLocPhysReg) {
     NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
@@ -3278,7 +3291,7 @@
   LoadValueDirectFixed(rl_shift, t_reg);
   if (is_two_addr) {
     // Can we do this directly into memory?
-    rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocWideTyped(rl_dest);
     if (rl_result.location != kLocPhysReg) {
       // Okay, we can do this into memory
       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eeb..9616d8f 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@
 
 RegStorage rs_rX86_SP;
 
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
 RegStorage rs_rX86_ARG0;
 RegStorage rs_rX86_ARG1;
 RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@
 }
 
 RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+  UNUSED(reg);
   LOG(FATAL) << "Do not use this function!!!";
-  return RegStorage::InvalidReg();
+  UNREACHABLE();
 }
 
 /*
@@ -795,14 +777,11 @@
   class_type_address_insns_.reserve(100);
   call_method_insns_.reserve(100);
   store_method_addr_used_ = false;
-  if (kIsDebugBuild) {
     for (int i = 0; i < kX86Last; i++) {
-      if (X86Mir2Lir::EncodingMap[i].opcode != i) {
-        LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
-                   << " is wrong: expecting " << i << ", seeing "
-                   << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
-      }
-    }
+      DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+          << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+          << " is wrong: expecting " << i << ", seeing "
+          << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
   }
   if (cu_->target64) {
     rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@
     rs_rX86_FARG5 = rs_fr5;
     rs_rX86_FARG6 = rs_fr6;
     rs_rX86_FARG7 = rs_fr7;
-    rX86_ARG0 = rDI;
-    rX86_ARG1 = rSI;
-    rX86_ARG2 = rDX;
-    rX86_ARG3 = rCX;
-    rX86_ARG4 = r8;
-    rX86_ARG5 = r9;
-    rX86_FARG0 = fr0;
-    rX86_FARG1 = fr1;
-    rX86_FARG2 = fr2;
-    rX86_FARG3 = fr3;
-    rX86_FARG4 = fr4;
-    rX86_FARG5 = fr5;
-    rX86_FARG6 = fr6;
-    rX86_FARG7 = fr7;
     rs_rX86_INVOKE_TGT = rs_rDI;
   } else {
     rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@
     rs_rX86_FARG5 = RegStorage::InvalidReg();
     rs_rX86_FARG6 = RegStorage::InvalidReg();
     rs_rX86_FARG7 = RegStorage::InvalidReg();
-    rX86_ARG0 = rAX;
-    rX86_ARG1 = rCX;
-    rX86_ARG2 = rDX;
-    rX86_ARG3 = rBX;
-    rX86_FARG0 = rAX;
-    rX86_FARG1 = rCX;
-    rX86_FARG2 = rDX;
-    rX86_FARG3 = rBX;
     rs_rX86_INVOKE_TGT = rs_rAX;
     // TODO(64): Initialize with invalid reg
 //    rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@
   rs_rX86_RET0 = rs_rAX;
   rs_rX86_RET1 = rs_rDX;
   rs_rX86_COUNT = rs_rCX;
-  rX86_RET0 = rAX;
-  rX86_RET1 = rDX;
-  rX86_INVOKE_TGT = rAX;
-  rX86_COUNT = rCX;
 }
 
 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@
 
 // Not used in x86(-64)
 RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+  UNUSED(trampoline);
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
-  return RegStorage::InvalidReg();
+  UNREACHABLE();
 }
 
 LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@
       ReturnVectorRegisters(mir);
       break;
     case kMirOpConstVector:
-      GenConst128(bb, mir);
+      GenConst128(mir);
       break;
     case kMirOpMoveVector:
-      GenMoveVector(bb, mir);
+      GenMoveVector(mir);
       break;
     case kMirOpPackedMultiply:
-      GenMultiplyVector(bb, mir);
+      GenMultiplyVector(mir);
       break;
     case kMirOpPackedAddition:
-      GenAddVector(bb, mir);
+      GenAddVector(mir);
       break;
     case kMirOpPackedSubtract:
-      GenSubtractVector(bb, mir);
+      GenSubtractVector(mir);
       break;
     case kMirOpPackedShiftLeft:
-      GenShiftLeftVector(bb, mir);
+      GenShiftLeftVector(mir);
       break;
     case kMirOpPackedSignedShiftRight:
-      GenSignedShiftRightVector(bb, mir);
+      GenSignedShiftRightVector(mir);
       break;
     case kMirOpPackedUnsignedShiftRight:
-      GenUnsignedShiftRightVector(bb, mir);
+      GenUnsignedShiftRightVector(mir);
       break;
     case kMirOpPackedAnd:
-      GenAndVector(bb, mir);
+      GenAndVector(mir);
       break;
     case kMirOpPackedOr:
-      GenOrVector(bb, mir);
+      GenOrVector(mir);
       break;
     case kMirOpPackedXor:
-      GenXorVector(bb, mir);
+      GenXorVector(mir);
       break;
     case kMirOpPackedAddReduce:
-      GenAddReduceVector(bb, mir);
+      GenAddReduceVector(mir);
       break;
     case kMirOpPackedReduce:
-      GenReduceVector(bb, mir);
+      GenReduceVector(mir);
       break;
     case kMirOpPackedSet:
-      GenSetVector(bb, mir);
+      GenSetVector(mir);
       break;
     case kMirOpMemBarrier:
       GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@
   }
 }
 
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
   Clobber(rs_dest);
 
@@ -1689,7 +1643,7 @@
   load->target = data_target;
 }
 
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@
   NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
 }
 
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
   // Destination does not need clobbered because it has already been as part
   // of the general packed shift handler (caller of this method).
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@
   AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
 }
 
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     default:
       LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     case k64:
       // TODO Implement emulated shift algorithm.
     default:
       LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
-      break;
+      UNREACHABLE();
   }
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@
       break;
     case kSignedByte:
     case kUnsignedByte:
-      GenShiftByteVector(bb, mir);
+      GenShiftByteVector(mir);
       return;
     default:
       LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@
   NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
 }
 
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@
   NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@
   NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
 }
 
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
   // We only support 128 bit registers.
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@
   AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
 }
 
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
   bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@
     // except the rhs is not a VR but a physical register allocated above.
     // No load of source VR is done because it assumes that rl_result will
     // share physical register / memory location.
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
     if (rl_result.location == kLocPhysReg) {
       // Ensure res is in a core reg.
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@
   }
 }
 
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegLocation rl_dest = mir_graph_->GetDest(mir);
   RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@
   } else {
     int extract_index = mir->dalvikInsn.arg[0];
     int extr_opcode = 0;
-    rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+    rl_result = UpdateLocTyped(rl_dest);
 
     // Handle the rest of integral types now.
     switch (opsize) {
@@ -2302,7 +2256,7 @@
         break;
       default:
         LOG(FATAL) << "Unsupported vector reduce " << opsize;
-        return;
+        UNREACHABLE();
     }
 
     if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@
   }
 }
 
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
   RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@
   }
 }
 
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
 }
 
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+  UNUSED(bb, mir);
   UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
 }
 
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc..cb9a24a 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@
 }
 
 bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+  UNUSED(value);
   return true;
 }
 
 bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
-  return false;
+  return value == 0;
 }
 
 bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+  UNUSED(value);
   return true;
 }
 
@@ -934,13 +936,14 @@
 
 LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
                                    int offset, int check_value, LIR* target, LIR** compare) {
-    LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
-            offset, check_value);
-    if (compare != nullptr) {
-        *compare = inst;
-    }
-    LIR* branch = OpCondBranch(cond, target);
-    return branch;
+  UNUSED(temp_reg);  // Comparison performed directly with memory.
+  LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+      offset, check_value);
+  if (compare != nullptr) {
+    *compare = inst;
+  }
+  LIR* branch = OpCondBranch(cond, target);
+  return branch;
 }
 
 void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +968,13 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
   if (bb->block_type == kDead) {
     // Ignore dead blocks
     return;
   }
 
-  for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int opcode = mir->dalvikInsn.opcode;
     if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
       AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +985,7 @@
 }
 
 
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
   switch (opcode) {
     // Instructions referencing doubles.
     case kMirOpFusedCmplDouble:
@@ -1009,7 +1012,7 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
   // Looking for
   // - Do we need a pointer to the code (used for packed switches and double lits)?
 
@@ -1046,7 +1049,8 @@
   }
 }
 
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+  UNUSED(bb);
   // Look at all the uses, and see if they are double constants.
   uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
   int next_sreg = 0;
@@ -1080,7 +1084,7 @@
   }
 }
 
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
   loc = UpdateLoc(loc);
   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
     if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1098,7 @@
   return loc;
 }
 
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
   loc = UpdateLocWide(loc);
   if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
     if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1112,8 @@
   return loc;
 }
 
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+  UNUSED(opcode, bb);
   // For now this is only actual for x86-32.
   if (cu_->target64) {
     return;
@@ -1132,6 +1137,7 @@
 }
 
 LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+  UNUSED(r_tgt);  // Call to absolute memory location doesn't need a temporary target register.
   if (cu_->target64) {
     return OpThreadMem(op, GetThreadOffset<8>(trampoline));
   } else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30..afdc244 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,25 +313,6 @@
 constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
 constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
 
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
 extern RegStorage rs_rX86_ARG0;
 extern RegStorage rs_rX86_ARG1;
 extern RegStorage rs_rX86_ARG2;
@@ -674,6 +655,7 @@
   kX86RepneScasw,       // repne scasw
   kX86Last
 };
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
 
 /* Instruction assembly field_loc kind */
 enum X86EncodingKind {