Merge "Clean up usage of carry flag condition codes."
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 35d04ae..eb9ae06 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -203,10 +203,10 @@
 enum ConditionCode {
   kCondEq,  // equal
   kCondNe,  // not equal
-  kCondCs,  // carry set (unsigned less than)
-  kCondUlt = kCondCs,
-  kCondCc,  // carry clear (unsigned greater than or same)
-  kCondUge = kCondCc,
+  kCondCs,  // carry set
+  kCondCc,  // carry clear
+  kCondUlt, // unsigned less than
+  kCondUge, // unsigned greater than or same
   kCondMi,  // minus
   kCondPl,  // plus, positive or zero
   kCondVs,  // overflow
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 8226b24..661050f 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -608,7 +608,7 @@
   }
   if (!skip_overflow_check) {
     OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
-    GenRegRegCheck(kCondCc, rARM_LR, r12, kThrowStackOverflow);
+    GenRegRegCheck(kCondUlt, rARM_LR, r12, kThrowStackOverflow);
     OpRegCopy(rARM_SP, rARM_LR);     // Establish stack
   } else {
     OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index dc2e0d0..8af9cdd 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -216,7 +216,7 @@
       break;
     case kCondGe:
       if (gt_bias) {
-        ccode = kCondCs;
+        ccode = kCondUge;
       }
       break;
     default:
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index d5173b0..86ae75e 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -128,31 +128,23 @@
   int32_t low_reg = rl_src1.low_reg;
   int32_t high_reg = rl_src1.high_reg;
 
+  if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
+    int t_reg = AllocTemp();
+    NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+    FreeTemp(t_reg);
+    OpCondBranch(ccode, taken);
+    return;
+  }
+
   switch (ccode) {
     case kCondEq:
     case kCondNe:
-      LIR* target;
-      ConditionCode condition;
-      if (ccode == kCondEq) {
-        target = not_taken;
-        condition = kCondEq;
-      } else {
-        target = taken;
-        condition = kCondNe;
-      }
-      if (val == 0) {
-        int t_reg = AllocTemp();
-        NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
-        FreeTemp(t_reg);
-        OpCondBranch(condition, taken);
-        return;
-      }
-      OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
+      OpCmpImmBranch(kCondNe, high_reg, val_hi, (ccode == kCondEq) ? not_taken : taken);
       break;
     case kCondLt:
       OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
       OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
-      ccode = kCondCc;
+      ccode = kCondUlt;
       break;
     case kCondLe:
       OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
@@ -167,7 +159,7 @@
     case kCondGe:
       OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
       OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
-      ccode = kCondCs;
+      ccode = kCondUge;
       break;
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
@@ -187,7 +179,7 @@
     rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if ((true_val == 1) && (false_val == 0)) {
       OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
-      OpIT(kCondCc, "");
+      OpIT(kCondUlt, "");
       LoadConstant(rl_result.low_reg, 0);
       GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
@@ -238,9 +230,7 @@
   // Normalize such that if either operand is constant, src2 will be constant.
   ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
   if (rl_src1.is_const) {
-    RegLocation rl_temp = rl_src1;
-    rl_src1 = rl_src2;
-    rl_src2 = rl_temp;
+    std::swap(rl_src1, rl_src2);
     ccode = FlipComparisonOrder(ccode);
   }
   if (rl_src2.is_const) {
@@ -268,7 +258,7 @@
     case kCondLt:
       OpCondBranch(kCondLt, taken);
       OpCondBranch(kCondGt, not_taken);
-      ccode = kCondCc;
+      ccode = kCondUlt;
       break;
     case kCondLe:
       OpCondBranch(kCondLt, taken);
@@ -283,7 +273,7 @@
     case kCondGe:
       OpCondBranch(kCondGt, taken);
       OpCondBranch(kCondLt, not_taken);
-      ccode = kCondCs;
+      ccode = kCondUge;
       break;
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
@@ -701,7 +691,7 @@
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   OpRegRegImm(kOpRsub, rl_result.low_reg, r_tmp, 1);
   DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
-  OpIT(kCondCc, "");
+  OpIT(kCondUlt, "");
   LoadConstant(rl_result.low_reg, 0); /* cc */
   FreeTemp(r_tmp);  // Now unneeded.
 
@@ -981,9 +971,7 @@
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
-      // TODO: change kCondCS to a more meaningful name, is the sense of
-      // carry-set/clear flipped?
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
     LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
@@ -1072,7 +1060,7 @@
     OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
     rl_src = LoadValue(rl_src, reg_class);
     if (needs_range_check) {
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
     StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
@@ -1172,9 +1160,7 @@
     // Normalize
     if (!rl_src2.is_const) {
       DCHECK(rl_src1.is_const);
-      RegLocation rl_temp = rl_src1;
-      rl_src1 = rl_src2;
-      rl_src2 = rl_temp;
+      std::swap(rl_src1, rl_src2);
     }
   }
   if (BadOverlap(rl_src1, rl_dest)) {
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 48c9af5..d80ae3b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -207,6 +207,8 @@
     case kCondNe: res = kArmCondNe; break;
     case kCondCs: res = kArmCondCs; break;
     case kCondCc: res = kArmCondCc; break;
+    case kCondUlt: res = kArmCondCc; break;
+    case kCondUge: res = kArmCondCs; break;
     case kCondMi: res = kArmCondMi; break;
     case kCondPl: res = kArmCondPl; break;
     case kCondVs: res = kArmCondVs; break;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 8a8b168..fa05d6c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -210,6 +210,9 @@
 }
 
 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+  // This is kThumb2BCond instead of kThumbBCond for performance reasons. The assembly
+  // time required for a new pass after kThumbBCond is fixed up to kThumb2BCond is
+  // substantial.
   LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
                         ArmConditionEncoding(cc));
   branch->target = target;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 82a1932..d942a24 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -896,7 +896,7 @@
       intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
-      OpCondBranch(kCondCs, launch_pad);
+      OpCondBranch(kCondUge, launch_pad);
     }
   } else {
     if (range_check) {
@@ -907,7 +907,7 @@
       intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
-      OpCondBranch(kCondCc, launch_pad);
+      OpCondBranch(kCondUge, launch_pad);
     }
     reg_off = AllocTemp();
     reg_ptr = AllocTemp();
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 14f49aa..2e385a3 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -318,7 +318,7 @@
   DCHECK_EQ(num_fp_spills_, 0);
   if (!skip_overflow_check) {
     OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_size_ - (spill_count * 4));
-    GenRegRegCheck(kCondCc, new_sp, check_reg, kThrowStackOverflow);
+    GenRegRegCheck(kCondUlt, new_sp, check_reg, kThrowStackOverflow);
     OpRegCopy(rMIPS_SP, new_sp);     // Establish stack
   } else {
     OpRegImm(kOpSub, rMIPS_SP, frame_size_ - (spill_count * 4));
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index dfff260..180d56c 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -77,11 +77,11 @@
       br_op = kMipsBne;
       cmp_zero = true;
       break;
-    case kCondCc:
+    case kCondUlt:
       slt_op = kMipsSltu;
       br_op = kMipsBnez;
       break;
-    case kCondCs:
+    case kCondUge:
       slt_op = kMipsSltu;
       br_op = kMipsBeqz;
       break;
@@ -485,9 +485,7 @@
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
-      // TODO: change kCondCS to a more meaningful name, is the sense of
-      // carry-set/clear flipped?
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
     LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
@@ -498,9 +496,7 @@
     rl_result = EvalLoc(rl_dest, reg_class, true);
 
     if (needs_range_check) {
-      // TODO: change kCondCS to a more meaningful name, is the sense of
-      // carry-set/clear flipped?
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
     LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
@@ -566,7 +562,7 @@
     rl_src = LoadValueWide(rl_src, reg_class);
 
     if (needs_range_check) {
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
 
@@ -574,7 +570,7 @@
   } else {
     rl_src = LoadValue(rl_src, reg_class);
     if (needs_range_check) {
-      GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+      GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
       FreeTemp(reg_len);
     }
     StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 1731703..6272498 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -322,7 +322,7 @@
         branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = not_taken;
       }
-      ccode = kCondCs;
+      ccode = kCondUlt;
       break;
     case kCondLe:
       if (gt_bias) {
@@ -343,7 +343,7 @@
         branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
         branch->target = taken;
       }
-      ccode = kCondCc;
+      ccode = kCondUge;
       break;
     default:
       LOG(FATAL) << "Unexpected ccode: " << ccode;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 56cf7e9..2c646d4 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -83,6 +83,8 @@
     case kCondNe: return kX86CondNe;
     case kCondCs: return kX86CondC;
     case kCondCc: return kX86CondNc;
+    case kCondUlt: return kX86CondC;
+    case kCondUge: return kX86CondNc;
     case kCondMi: return kX86CondS;
     case kCondPl: return kX86CondNs;
     case kCondVs: return kX86CondO;