Use (D)CHECK_ALIGNED more.

Change-Id: I9d740f6a88d01e028d4ddc3e4e62b0a73ea050af
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index d9ad9a3..338a41e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -63,7 +63,7 @@
 }
 
 inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   uint8_t* old_end;
   uint8_t* new_end;
   do {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a913e59..2798b21 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -440,7 +440,7 @@
       AllocationInfo* next_next_info = next_info->GetNextInfo();
       // Next next info can't be free since we always coalesce.
       DCHECK(!next_next_info->IsFree());
-      DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+      DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
       new_free_info = next_next_info;
       new_free_size += next_next_info->GetPrevFreeBytes();
       RemoveFreePrev(next_next_info);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index b014217..3a0d814 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -46,8 +46,8 @@
   if (create_bitmaps) {
     size_t bitmap_index = bitmap_index_++;
     static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
-    CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
-    CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
     live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
         Begin(), NonGrowthLimitCapacity()));
@@ -164,10 +164,10 @@
   // alloc spaces.
   RevokeAllThreadLocalBuffers();
   SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
-  DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
-  DCHECK(IsAligned<accounting::CardTable::kCardSize>(End()));
-  DCHECK(IsAligned<kPageSize>(begin_));
-  DCHECK(IsAligned<kPageSize>(End()));
+  DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize);
+  DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize);
+  DCHECK_ALIGNED(begin_, kPageSize);
+  DCHECK_ALIGNED(End(), kPageSize);
   size_t size = RoundUp(Size(), kPageSize);
   // Trimming the heap should be done by the caller since we may have invalidated the accounting
   // stored in between objects.
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 1cdf69d..db005f7 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -43,7 +43,7 @@
 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
                                                     size_t* usable_size,
                                                     size_t* bytes_tl_bulk_allocated) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   mirror::Object* obj;
   if (LIKELY(num_bytes <= kRegionSize)) {
     // Non-large object.
@@ -115,7 +115,7 @@
                                                   size_t* usable_size,
                                                   size_t* bytes_tl_bulk_allocated) {
   DCHECK(IsAllocated() && IsInToSpace());
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
   uint8_t* old_top;
   uint8_t* new_top;
@@ -266,7 +266,7 @@
 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
                                         size_t* usable_size,
                                         size_t* bytes_tl_bulk_allocated) {
-  DCHECK(IsAligned<kAlignment>(num_bytes));
+  DCHECK_ALIGNED(num_bytes, kAlignment);
   DCHECK_GT(num_bytes, kRegionSize);
   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
   DCHECK_GT(num_regs, 0U);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 814ab6c..9a2d0c6 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -287,7 +287,7 @@
 
 void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
   DCHECK(Contains(large_obj));
-  DCHECK(IsAligned<kRegionSize>(large_obj));
+  DCHECK_ALIGNED(large_obj, kRegionSize);
   MutexLock mu(Thread::Current(), region_lock_);
   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
   uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
@@ -366,7 +366,7 @@
   uint8_t* tlab_start = thread->GetTlabStart();
   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
   if (tlab_start != nullptr) {
-    DCHECK(IsAligned<kRegionSize>(tlab_start));
+    DCHECK_ALIGNED(tlab_start, kRegionSize);
     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
     DCHECK(r->IsAllocated());
     DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);