Use (D)CHECK_ALIGNED more.

Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index df4a9f2..5f911db 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1298,7 +1298,7 @@
              */
             delta &= ~0x3;
           }
-          DCHECK_EQ((delta & 0x3), 0);
+          DCHECK_ALIGNED(delta, 4);
           // First, a sanity check for cases we shouldn't see now
           if (kIsDebugBuild && (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
               ((lir->opcode == kThumbLdrPcRel) && (delta > 1020)))) {
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 2ef92f8..062f7af 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -880,7 +880,7 @@
 LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base,
                                                       int displacement, RegStorage r_src_dest,
                                                       RegStorage r_work) {
-  DCHECK_EQ(displacement & 3, 0);
+  DCHECK_ALIGNED(displacement, 4);
   constexpr int kOffsetMask = 0xff << 2;
   int encoded_disp = (displacement & kOffsetMask) >> 2;  // Within range of the instruction.
   RegStorage r_ptr = r_base;
@@ -942,7 +942,7 @@
         already_generated = true;
         break;
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       scale = 2;
       if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
           (displacement >= 0)) {
@@ -959,14 +959,14 @@
       }
       break;
     case kUnsignedHalf:
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       scale = 1;
       short_form = all_low && (displacement >> (5 + scale)) == 0;
       opcode16 = kThumbLdrhRRI5;
       opcode32 = kThumb2LdrhRRI12;
       break;
     case kSignedHalf:
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       scale = 1;
       DCHECK_EQ(opcode16, kThumbBkpt);  // Not available.
       opcode32 = kThumb2LdrshRRI12;
@@ -1096,7 +1096,7 @@
         already_generated = true;
         break;
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       scale = 2;
       if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
@@ -1109,7 +1109,7 @@
       break;
     case kUnsignedHalf:
     case kSignedHalf:
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       scale = 1;
       short_form = all_low && (displacement >> (5 + scale)) == 0;
       opcode16 = kThumbStrhRRI5;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index b78fb80..25c69d1 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -909,7 +909,7 @@
           CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           int32_t delta = target - pc;
-          DCHECK_EQ(delta & 0x3, 0);
+          DCHECK_ALIGNED(delta, 4);
           if (!IS_SIGNED_IMM26(delta >> 2)) {
             LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
           }
@@ -933,7 +933,7 @@
           CodeOffset target = target_lir->offset +
             ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           int32_t delta = target - pc;
-          DCHECK_EQ(delta & 0x3, 0);
+          DCHECK_ALIGNED(delta, 4);
           if (!IS_SIGNED_IMM19(delta >> 2)) {
             LOG(FATAL) << "Invalid jump range in kFixupLoad";
           }
@@ -965,7 +965,7 @@
           CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           int32_t delta = target - pc;
-          DCHECK_EQ(delta & 0x3, 0);
+          DCHECK_ALIGNED(delta, 4);
           // Check if branch offset can be encoded in tbz/tbnz.
           if (!IS_SIGNED_IMM14(delta >> 2)) {
             DexOffset dalvik_offset = lir->dalvik_offset;
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 37e5804..ec2475a 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -714,7 +714,7 @@
         } else {
           opcode = kMipsFldc1;
         }
-        DCHECK_EQ((displacement & 0x3), 0);
+        DCHECK_ALIGNED(displacement, 4);
         break;
       }
       is64bit = true;
@@ -736,15 +736,15 @@
           DCHECK(r_dest.IsDouble());
         }
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       break;
     case kUnsignedHalf:
       opcode = kMipsLhu;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kSignedHalf:
       opcode = kMipsLh;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kUnsignedByte:
       opcode = kMipsLbu;
@@ -891,7 +891,7 @@
         } else {
           opcode = kMipsFsdc1;
         }
-        DCHECK_EQ((displacement & 0x3), 0);
+        DCHECK_ALIGNED(displacement, 4);
         break;
       }
       is64bit = true;
@@ -913,12 +913,12 @@
           DCHECK(r_src.IsDouble());
         }
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       break;
     case kUnsignedHalf:
     case kSignedHalf:
       opcode = kMipsSh;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kUnsignedByte:
     case kSignedByte:
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 61a1bec..b16ae98 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -659,7 +659,7 @@
         opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
       }
       // TODO: double store is to unaligned address
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       break;
     case kWord:
       if (cu_->target64) {
@@ -677,15 +677,15 @@
         opcode = is_array ? kX86MovssRA : kX86MovssRM;
         DCHECK(r_dest.IsFloat());
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       break;
     case kUnsignedHalf:
       opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kSignedHalf:
       opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kUnsignedByte:
       opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
@@ -812,7 +812,7 @@
         opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
       }
       // TODO: double store is to unaligned address
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       break;
     case kWord:
       if (cu_->target64) {
@@ -831,13 +831,13 @@
         opcode = is_array ? kX86MovssAR : kX86MovssMR;
         DCHECK(r_src.IsSingle());
       }
-      DCHECK_EQ((displacement & 0x3), 0);
+      DCHECK_ALIGNED(displacement, 4);
       consider_non_temporal = true;
       break;
     case kUnsignedHalf:
     case kSignedHalf:
       opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
-      DCHECK_EQ((displacement & 0x1), 0);
+      DCHECK_ALIGNED(displacement, 2);
       break;
     case kUnsignedByte:
     case kSignedByte:
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index b4aa286..13f67e6 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -50,7 +50,7 @@
 
     // We want to put the method3 at a very precise offset.
     const uint32_t method3_offset = method1_offset + distance_without_thunks;
-    CHECK(IsAligned<kArmAlignment>(method3_offset - sizeof(OatQuickMethodHeader)));
+    CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment);
 
     // Calculate size of method2 so that we put method3 at the correct place.
     const uint32_t method2_offset =
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 29355d6..6b9c530 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -108,7 +108,7 @@
     if (!current_method_thunks_.empty()) {
       uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64);
       if (kIsDebugBuild) {
-        CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size()));
+        CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
         size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
         CHECK_LE(num_thunks, processed_adrp_thunks_);
         for (size_t i = 0u; i != num_thunks; ++i) {
@@ -203,7 +203,7 @@
       if ((adrp & 0x9f000000u) != 0x90000000u) {
         CHECK(fix_cortex_a53_843419_);
         CHECK_EQ(adrp & 0xfc000000u, 0x14000000u);  // B <thunk>
-        CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size()));
+        CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
         size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
         CHECK_LE(num_thunks, processed_adrp_thunks_);
         uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 1bad8a9..b3af4c6 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -66,7 +66,7 @@
     // We want to put the method3 at a very precise offset.
     const uint32_t last_method_offset = method1_offset + distance_without_thunks;
     const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
-    CHECK(IsAligned<kArm64Alignment>(gap_end));
+    CHECK_ALIGNED(gap_end, kArm64Alignment);
 
     // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
     // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 09d2270..0e3e08c 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -252,11 +252,11 @@
   if (offset_ < 0) {
     int32_t off = -offset_;
     CHECK_LT(off, 1024);
-    CHECK_EQ((off & 3 /* 0b11 */), 0);    // Must be multiple of 4.
+    CHECK_ALIGNED(off, 4);
     encoding = (am ^ (1 << kUShift)) | off >> 2;  // Flip U to adjust sign.
   } else {
     CHECK_LT(offset_, 1024);
-    CHECK_EQ((offset_ & 3 /* 0b11 */), 0);    // Must be multiple of 4.
+    CHECK_ALIGNED(offset_, 4);
     encoding =  am | offset_ >> 2;
   }
   encoding |= static_cast<uint32_t>(rn_) << 16;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 88b2f2c..5843886 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -101,7 +101,7 @@
   }
 
   // Adjust literal pool labels for padding.
-  DCHECK_EQ(current_code_size & 1u, 0u);
+  DCHECK_ALIGNED(current_code_size, 2);
   uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size();
   if (literals_adjustment != 0u) {
     for (Literal& literal : literals_) {
@@ -152,7 +152,7 @@
     // Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment.
     // We don't support byte and half-word literals.
     uint32_t code_size = buffer_.Size();
-    DCHECK_EQ(code_size & 1u, 0u);
+    DCHECK_ALIGNED(code_size, 2);
     if ((code_size & 2u) != 0u) {
       Emit16(0);
     }
@@ -168,7 +168,7 @@
 }
 
 inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) {
-  DCHECK_EQ(offset & 1, 0);
+  DCHECK_ALIGNED(offset, 2);
   int16_t encoding = B15 | B14;
   if (cond != AL) {
     DCHECK(IsInt<9>(offset));
@@ -181,7 +181,7 @@
 }
 
 inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) {
-  DCHECK_EQ(offset & 1, 0);
+  DCHECK_ALIGNED(offset, 2);
   int32_t s = (offset >> 31) & 1;   // Sign bit.
   int32_t encoding = B31 | B30 | B29 | B28 | B15 |
       (s << 26) |                   // Sign bit goes to bit 26.
@@ -205,7 +205,7 @@
 
 inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) {
   DCHECK(!IsHighRegister(rn));
-  DCHECK_EQ(offset & 1, 0);
+  DCHECK_ALIGNED(offset, 2);
   DCHECK(IsUint<7>(offset));
   DCHECK(cond == EQ || cond == NE);
   return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast<int32_t>(rn) |
@@ -250,7 +250,7 @@
 
 inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) {
   DCHECK(!IsHighRegister(rt));
-  DCHECK_EQ(offset & 3, 0);
+  DCHECK_ALIGNED(offset, 4);
   DCHECK(IsUint<10>(offset));
   return B14 | B11 | (static_cast<int32_t>(rt) << 8) | (offset >> 2);
 }
@@ -261,7 +261,7 @@
 }
 
 inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) {
-  DCHECK_EQ(offset & 3, 0);
+  DCHECK_ALIGNED(offset, 4);
   CHECK(IsUint<10>(offset));
   return B31 | B30 | B29 | B27 |
       B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 |
@@ -270,7 +270,7 @@
 }
 
 inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) {
-  DCHECK_EQ(offset & 3, 0);
+  DCHECK_ALIGNED(offset, 4);
   CHECK(IsUint<10>(offset));
   return B31 | B30 | B29 | B27 | B26 | B24 |
       B23 /* U = 1 */ | B20 | B11 | B9 |
@@ -281,7 +281,7 @@
 }
 
 inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) {
-  DCHECK_EQ(offset & 3, 0);
+  DCHECK_ALIGNED(offset, 4);
   CHECK(IsUint<10>(offset));
   return B31 | B30 | B29 | B27 | B26 | B24 |
       B23 /* U = 1 */ | B20 | B11 | B9 | B8 |
@@ -294,7 +294,7 @@
 inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) {
   DCHECK(!IsHighRegister(rt));
   DCHECK(!IsHighRegister(rn));
-  DCHECK_EQ(offset & 3, 0);
+  DCHECK_ALIGNED(offset, 4);
   DCHECK(IsUint<7>(offset));
   return B14 | B13 | B11 |
       (static_cast<int32_t>(rn) << 3) | static_cast<int32_t>(rt) |
@@ -1423,7 +1423,7 @@
           thumb_opcode = 3U /* 0b11 */;
           opcode_shift = 12;
           CHECK_LT(immediate, (1u << 9));
-          CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+          CHECK_ALIGNED(immediate, 4);
 
           // Remove rd and rn from instruction by orring it with immed and clearing bits.
           rn = R0;
@@ -1437,7 +1437,7 @@
           thumb_opcode = 5U /* 0b101 */;
           opcode_shift = 11;
           CHECK_LT(immediate, (1u << 10));
-          CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+          CHECK_ALIGNED(immediate, 4);
 
           // Remove rn from instruction.
           rn = R0;
@@ -1474,7 +1474,7 @@
            thumb_opcode = 0x61 /* 0b1100001 */;
            opcode_shift = 7;
            CHECK_LT(immediate, (1u << 9));
-           CHECK_EQ((immediate & 3u /* 0b11 */), 0u);
+           CHECK_ALIGNED(immediate, 4);
 
            // Remove rd and rn from instruction by orring it with immed and clearing bits.
            rn = R0;
@@ -1652,7 +1652,7 @@
 
 inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) {
   // The code size must be a multiple of 2.
-  DCHECK_EQ(current_code_size & 1u, 0u);
+  DCHECK_ALIGNED(current_code_size, 2);
   // If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool.
   return current_code_size & 2;
 }
@@ -1697,7 +1697,7 @@
       // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC
       // isn't a multiple of 2, we need to adjust. Since we already adjusted for the target
       // being aligned, current PC alignment can be inferred from diff.
-      DCHECK_EQ(diff & 1, 0);
+      DCHECK_ALIGNED(diff, 2);
       diff = diff + (diff & 2);
       DCHECK_GE(diff, 0);
       break;
@@ -2045,7 +2045,7 @@
       if (sp_relative) {
         // SP relative, 10 bit offset.
         CHECK_LT(offset, (1 << 10));
-        CHECK_EQ((offset & 3 /* 0b11 */), 0);
+        CHECK_ALIGNED(offset, 4);
         encoding |= rd << 8 | offset >> 2;
       } else {
         // No SP relative.  The offset is shifted right depending on
@@ -2058,12 +2058,12 @@
         } else if (half) {
           // 6 bit offset, shifted by 1.
           CHECK_LT(offset, (1 << 6));
-          CHECK_EQ((offset & 1 /* 0b1 */), 0);
+          CHECK_ALIGNED(offset, 2);
           offset >>= 1;
         } else {
           // 7 bit offset, shifted by 2.
           CHECK_LT(offset, (1 << 7));
-          CHECK_EQ((offset & 3 /* 0b11 */), 0);
+          CHECK_ALIGNED(offset, 4);
           offset >>= 2;
         }
         encoding |= rn << 3 | offset  << 6;