AArch64: Enable MOVE_*, some CONST_*, CMP_*.

With the fixes of GenArithImmOpLong, GenShiftOpLong, OpRegImm,
OpRegRegImm, OpRegRegImm64, EncodeLogicalImmediate and fmov.

Change-Id: I8cae4f921d5150a6b8e4803ca4dee553928d1a58
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 4a0c055..fcaaba5 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -268,7 +268,7 @@
                  kFmtRegS, 4, 0, kFmtRegW, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fmov", "!0s, !1w", kFixupNone),
-    ENCODING_MAP(kA64Fmov2Sx, NO_VARIANTS(0x9e6f0000),
+    ENCODING_MAP(kA64Fmov2Sx, NO_VARIANTS(0x9e670000),
                  kFmtRegD, 4, 0, kFmtRegX, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fmov", "!0S, !1x", kFixupNone),
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index c92832e..8ecc393 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -185,6 +185,7 @@
     LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
     LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
     LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+    LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
     LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
     LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
     LIR* OpTestSuspend(LIR* target);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 1ad0435..a18cc82 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -77,10 +77,10 @@
   default:
     LOG(FATAL) << "Unexpected case: " << opcode;
   }
-  rl_shift = LoadValueWide(rl_shift, kCoreReg);
+  rl_shift = LoadValue(rl_shift, kCoreReg);
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_shift.reg);
+  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
   StoreValueWide(rl_dest, rl_result);
 }
 
@@ -1021,7 +1021,7 @@
 
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegImm(op, rl_result.reg, rl_src1.reg, val);
+  OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
   StoreValueWide(rl_dest, rl_result);
 }
 
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 954360d..bb8b7e3 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -146,7 +146,7 @@
 
 static int CountSetBits(bool is_wide, uint64_t value) {
   return ((is_wide) ?
-          __builtin_popcountl(value) : __builtin_popcount((uint32_t)value));
+          __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
 }
 
 /**
@@ -552,8 +552,11 @@
   return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
 }
 
-// Should be taking an int64_t value ?
 LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
+  return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
+}
+
+LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
   LIR* res;
   bool neg = (value < 0);
   int64_t abs_value = (neg) ? -value : value;
@@ -637,11 +640,17 @@
     return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
   } else {
     RegStorage r_scratch = AllocTemp();
-    LoadConstant(r_scratch, value);
+    if (IS_WIDE(wide)) {
+      r_scratch = AllocTempWide();
+      LoadConstantWide(r_scratch, value);
+    } else {
+      r_scratch = AllocTemp();
+      LoadConstant(r_scratch, value);
+    }
     if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
+      res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
     else
-      res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+      res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
     FreeTemp(r_scratch);
     return res;
   }
@@ -666,9 +675,36 @@
     // abs_value is a shifted 12-bit immediate.
     shift = true;
     abs_value >>= 12;
+  } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
+    // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
+    // This works for both normal registers and SP.
+    // For a frame size == 0x2468, it will be encoded as:
+    //   sub sp, #0x2000
+    //   sub sp, #0x468
+    if (neg) {
+      op = (op == kOpAdd) ? kOpSub : kOpAdd;
+    }
+    OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
+    return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
+  } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
+    // Note: "sub sp, sp, Xm" is not correct on arm64.
+    // We need special instructions for SP.
+    // Also operation on 32-bit SP should be avoided.
+    DCHECK(IS_WIDE(wide));
+    RegStorage r_tmp = AllocTempWide();
+    OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
+    OpRegImm64(op, r_tmp, value);
+    return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
   } else {
-    RegStorage r_tmp = AllocTemp();
-    LIR* res = LoadConstant(r_tmp, value);
+    RegStorage r_tmp;
+    LIR* res;
+    if (IS_WIDE(wide)) {
+      r_tmp = AllocTempWide();
+      res = LoadConstantWide(r_tmp, value);
+    } else {
+      r_tmp = AllocTemp();
+      res = LoadConstant(r_tmp, value);
+    }
     OpRegReg(op, r_dest_src1, r_tmp);
     FreeTemp(r_tmp);
     return res;