Refactor and improve mod-union tables.

Allow support for adding more mod union tables, reduces the amount
of baked in logic. Adds support for updating mod union table references
from compaction (not for ReferenceCache table yet).

Change-Id: I1beeda00839ed86ef0e853beff5ce10d0ab2b9d1
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index d0b0b5c..270c9ef 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -29,7 +29,7 @@
 namespace collector {
 
 template <typename MarkVisitor>
-inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
+inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) {
   DCHECK(obj != NULL);
   if (kIsDebugBuild && !IsMarked(obj)) {
     heap_->DumpSpaces();
@@ -62,7 +62,8 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
+                                             bool visit_class)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                           Locks::mutator_lock_) {
   DCHECK(obj != NULL);
@@ -70,6 +71,9 @@
 
   mirror::Class* klass = obj->GetClass();
   DCHECK(klass != NULL);
+  if (visit_class) {
+    visitor(obj, klass, MemberOffset(0), false);
+  }
   if (klass == mirror::Class::GetJavaLangClass()) {
     DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
     VisitClassReferences(klass, obj, visitor);
@@ -86,8 +90,8 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass,
-                                                     const mirror::Object* obj,
+inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass,
+                                                     mirror::Object* obj,
                                                      const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   DCHECK(obj != NULL);
@@ -96,7 +100,7 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+inline void MarkSweep::VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
                                             const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   VisitInstanceFieldsReferences(klass, obj, visitor);
@@ -104,15 +108,14 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitStaticFieldsReferences(const mirror::Class* klass,
-                                                   const Visitor& visitor)
+inline void MarkSweep::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   DCHECK(klass != NULL);
   VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets,
+inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets,
                                              bool is_static, const Visitor& visitor) {
   if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
     // Found a reference offset bitmap.  Mark the specified offsets.
@@ -124,7 +127,7 @@
     while (ref_offsets != 0) {
       size_t right_shift = CLZ(ref_offsets);
       MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
-      const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+      mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
       visitor(obj, ref, field_offset, is_static);
       ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
     }
@@ -143,7 +146,7 @@
         mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
                                    : klass->GetInstanceField(i));
         MemberOffset field_offset = field->GetOffset();
-        const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+        mirror::Object* ref = obj->GetFieldObject<mirror::Object*>(field_offset, false);
         visitor(obj, ref, field_offset, is_static);
       }
     }
@@ -151,11 +154,11 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+inline void MarkSweep::VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
                                                   const Visitor& visitor) {
   const size_t length = static_cast<size_t>(array->GetLength());
   for (size_t i = 0; i < length; ++i) {
-    const mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
+    mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
     const size_t width = sizeof(mirror::Object*);
     MemberOffset offset(i * width + mirror::Array::DataOffset(width).Int32Value());
     visitor(array, element, offset, false);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 1625ba6..a5e66d2 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -28,6 +28,7 @@
 #include "base/timing_logger.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
@@ -99,7 +100,7 @@
   } else {
     const space::ContinuousSpace* prev_space = nullptr;
     // Find out if the previous space is immune.
-    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+    for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
       if (cur_space == space) {
         break;
       }
@@ -107,15 +108,19 @@
     }
     // If previous space was immune, then extend the immune region. Relies on continuous spaces
     // being sorted by Heap::AddContinuousSpace.
-    if (prev_space != NULL &&
-        immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
-        immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+    if (prev_space != NULL && IsImmuneSpace(prev_space)) {
       immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
       immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
     }
   }
 }
 
+bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) {
+  return
+      immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+      immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
 void MarkSweep::BindBitmaps() {
   timings_.StartSplit("BindBitmaps");
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -263,11 +268,23 @@
   }
   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
   MarkConcurrentRoots();
-
-  heap_->UpdateAndMarkModUnion(this, timings_, GetGcType());
+  UpdateAndMarkModUnion();
   MarkReachableObjects();
 }
 
+void MarkSweep::UpdateAndMarkModUnion() {
+  for (const auto& space : heap_->GetContinuousSpaces()) {
+    if (IsImmuneSpace(space)) {
+      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+          "UpdateAndMarkImageModUnionTable";
+      base::TimingLogger::ScopedSplit split(name, &timings_);
+      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
+      CHECK(mod_union_table != nullptr);
+      mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this);
+    }
+  }
+}
+
 void MarkSweep::MarkThreadRoots(Thread* self) {
   MarkRootsCheckpoint(self);
 }
@@ -577,11 +594,11 @@
 
 void MarkSweep::CheckObject(const Object* obj) {
   DCHECK(obj != NULL);
-  VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
-      bool is_static) NO_THREAD_SAFETY_ANALYSIS {
+  VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref,
+      MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS {
     Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
     CheckReference(obj, ref, offset, is_static);
-  });
+  }, true);
 }
 
 void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
@@ -647,11 +664,11 @@
     explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
         : chunk_task_(chunk_task) {}
 
-    void operator()(const Object* obj) const {
+    void operator()(Object* obj) const {
       MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
       mark_sweep->ScanObjectVisit(obj,
-          [mark_sweep, this](const Object* /* obj */, const Object* ref,
-              const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
+          [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
+              bool /* is_static */) ALWAYS_INLINE {
         if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
           if (kUseFinger) {
             android_memory_barrier();
@@ -708,11 +725,11 @@
     static const size_t kFifoSize = 4;
     BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
     for (;;) {
-      const Object* obj = NULL;
+      const Object* obj = nullptr;
       if (kUseMarkStackPrefetch) {
         while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
           const Object* obj = mark_stack_[--mark_stack_pos_];
-          DCHECK(obj != NULL);
+          DCHECK(obj != nullptr);
           __builtin_prefetch(obj);
           prefetch_fifo.push_back(obj);
         }
@@ -727,8 +744,8 @@
         }
         obj = mark_stack_[--mark_stack_pos_];
       }
-      DCHECK(obj != NULL);
-      visitor(obj);
+      DCHECK(obj != nullptr);
+      visitor(const_cast<mirror::Object*>(obj));
     }
   }
 };
@@ -1366,7 +1383,7 @@
 // and dispatches to a specialized scanning routine.
 void MarkSweep::ScanObject(const Object* obj) {
   MarkObjectVisitor visitor(this);
-  ScanObjectVisit(obj, visitor);
+  ScanObjectVisit(const_cast<Object*>(obj), visitor);
 }
 
 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 76e71fd..19df2da 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -114,6 +114,9 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  bool IsImmuneSpace(const space::ContinuousSpace* space)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
   // the image. Mark that portion of the heap as immune.
   virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -137,6 +140,9 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  virtual void UpdateAndMarkModUnion()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Sweeps unmarked objects to complete the garbage collection.
   virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
@@ -163,7 +169,7 @@
 
   // TODO: enable thread safety analysis when in use by multiple worker threads.
   template <typename MarkVisitor>
-  void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+  void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
   size_t GetFreedBytes() const {
@@ -215,7 +221,8 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   template <typename Visitor>
-  static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+  static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
+                                    bool visit_class = false)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
@@ -306,7 +313,7 @@
   size_t GetThreadCount(bool paused) const;
 
   // Returns true if an object is inside of the immune region (assumed to be marked).
-  bool IsImmune(const mirror::Object* obj) const {
+  bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
     return obj >= immune_begin_ && obj < immune_end_;
   }
 
@@ -317,34 +324,34 @@
       NO_THREAD_SAFETY_ANALYSIS;
 
   template <typename Visitor>
-  static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+  static void VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
                                             const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visit the header, static field references, and interface pointers of a class object.
   template <typename Visitor>
-  static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+  static void VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
                                    const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   template <typename Visitor>
-  static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+  static void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   template <typename Visitor>
-  static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+  static void VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets, bool is_static,
                                     const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visit all of the references in an object array.
   template <typename Visitor>
-  static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+  static void VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
                                          const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visits the header and field references of a data object.
   template <typename Visitor>
-  static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+  static void VisitOtherReferences(mirror::Class* klass, mirror::Object* obj,
                                    const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     return VisitInstanceFieldsReferences(klass, obj, visitor);
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 79c4359..8bee00f 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -31,6 +31,10 @@
     return kGcTypeSticky;
   }
 
+  // Don't need to do anything special here since we scan all the cards which may have references
+  // to the newly allocated objects.
+  virtual void UpdateAndMarkModUnion() { }
+
   explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
   ~StickyMarkSweep() {}