Refactor GC to have a class for each different type of GC.

Added a seperate files for mark sweep, partial mark sweep,
sticky mark sweep.

Added a common superclass for GC.

Added additional statistics for each GC.

Moved main garbage collection code away from Heap.cc.

Change-Id: Ida0021ab2f740fc8228bbbf4d43cd9bc56b4ba46
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
new file mode 100644
index 0000000..bcc7b63
--- /dev/null
+++ b/src/gc/garbage_collector.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "garbage_collector.h"
+#include "thread_list.h"
+
+namespace art {
+  GarbageCollector::GarbageCollector(Heap* heap)
+      : heap_(heap),
+        duration_(0) {
+
+  }
+
+  bool GarbageCollector::HandleDirtyObjectsPhase() {
+    DCHECK(IsConcurrent());
+    return true;
+  }
+
+  void GarbageCollector::RegisterPause(uint64_t nano_length) {
+    pause_times_.push_back(nano_length);
+  }
+
+  void GarbageCollector::Run() {
+    Thread* self = Thread::Current();
+    ThreadList* thread_list = Runtime::Current()->GetThreadList();
+
+    uint64_t start_time = NanoTime();
+    pause_times_.clear();
+    duration_ = 0;
+
+    InitializePhase();
+
+    if (!IsConcurrent()) {
+      // Pause is the entire length of the GC.
+      uint64_t pause_start = NanoTime();
+      thread_list->SuspendAll();
+      MarkingPhase();
+      ReclaimPhase();
+      thread_list->ResumeAll();
+      uint64_t pause_end = NanoTime();
+      pause_times_.push_back(pause_end - pause_start);
+    } else {
+      {
+        ReaderMutexLock mu(self, *Locks::mutator_lock_);
+        MarkingPhase();
+      }
+      bool done = false;
+      while (!done) {
+        uint64_t pause_start = NanoTime();
+        thread_list->SuspendAll();
+        done = HandleDirtyObjectsPhase();
+        thread_list->ResumeAll();
+        uint64_t pause_end = NanoTime();
+        pause_times_.push_back(pause_end - pause_start);
+      }
+      {
+        ReaderMutexLock mu(self, *Locks::mutator_lock_);
+        ReclaimPhase();
+      }
+    }
+
+    uint64_t end_time = NanoTime();
+    duration_ = end_time - start_time;
+
+    FinishPhase();
+  }
+
+  GarbageCollector::~GarbageCollector() {
+
+  }
+}  // namespace art
diff --git a/src/gc/garbage_collector.h b/src/gc/garbage_collector.h
new file mode 100644
index 0000000..9ddf45f
--- /dev/null
+++ b/src/gc/garbage_collector.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_GARBAGE_COLLECTR_H_
+#define ART_SRC_GC_GARBAGE_COLLECTR_H_
+
+#include "locks.h"
+#include "utils.h"
+
+namespace art {
+
+class Heap;
+
+class GarbageCollector {
+ public:
+  // Returns true iff the garbage collector is concurrent.
+  virtual bool IsConcurrent() const = 0;
+
+  GarbageCollector(Heap* heap);
+
+  virtual ~GarbageCollector();
+
+  // Run the garbage collector.
+  void Run();
+
+  Heap* GetHeap() {
+    return heap_;
+  }
+
+  // Returns how long the mutators were paused in nanoseconds.
+  const std::vector<uint64_t>& GetPauseTimes() const {
+    return pause_times_;
+  }
+
+  // Returns how long the GC took to complete in nanoseconds.
+  uint64_t GetDuration() const {
+    return duration_;
+  }
+
+
+  virtual std::string GetName() const = 0;
+
+  void RegisterPause(uint64_t nano_length);
+
+ protected:
+  // The initial phase. Done with mutators upaused.
+  virtual void InitializePhase() = 0;
+
+  // Mark all reachable objects, done concurrently.
+  virtual void MarkingPhase() = 0;
+
+  // Only called for concurrent GCs. Gets called repeatedly until it succeeds.
+  virtual bool HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Called with mutators running.
+  virtual void ReclaimPhase() = 0;
+
+  // Called after the GC is finished. Done with mutators upaused.
+  virtual void FinishPhase() = 0;
+
+  Heap* heap_;
+  std::vector<uint64_t> pause_times_;
+  uint64_t duration_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_GARBAGE_COLLECTR_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index b066dd5..b2e0d2f 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -28,9 +28,7 @@
 namespace art {
 
 void LargeObjectSpace::SwapBitmaps() {
-  SpaceSetMap* temp_live_objects = live_objects_.release();
-  live_objects_.reset(mark_objects_.release());
-  mark_objects_.reset(temp_live_objects);
+  live_objects_.swap(mark_objects_);
   // Swap names to get more descriptive diagnostics.
   std::string temp_name = live_objects_->GetName();
   live_objects_->SetName(mark_objects_->GetName());
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 1ccceaa..818eb81 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -16,6 +16,8 @@
 
 #include "mark_sweep.h"
 
+#include <functional>
+#include <numeric>
 #include <climits>
 #include <vector>
 
@@ -43,7 +45,7 @@
 
 // Performance options.
 static const bool kParallelMarkStack = true;
-static const bool kDisableFinger = true;
+static const bool kDisableFinger = kParallelMarkStack;
 static const bool kUseMarkStackPrefetch = true;
 
 // Profiling and information flags.
@@ -67,38 +69,267 @@
   MarkSweep* const mark_sweep_;
 };
 
-MarkSweep::MarkSweep(ObjectStack* mark_stack)
-    : current_mark_bitmap_(NULL),
-      mark_stack_(mark_stack),
-      heap_(NULL),
-      finger_(NULL),
-      immune_begin_(NULL),
-      immune_end_(NULL),
-      soft_reference_list_(NULL),
-      weak_reference_list_(NULL),
-      finalizer_reference_list_(NULL),
-      phantom_reference_list_(NULL),
-      cleared_reference_list_(NULL),
-      freed_bytes_(0), freed_objects_(0),
-      class_count_(0), array_count_(0), other_count_(0),
-      large_object_test_(0), large_object_mark_(0),
-      classes_marked_(0), overhead_time_(0),
-      work_chunks_created_(0), work_chunks_deleted_(0),
-      reference_count_(0),
-      gc_barrier_(new Barrier(0)),
-      large_object_lock_("large object lock"),
-      mark_stack_expand_lock_("mark stack expand lock") {
-  DCHECK(mark_stack_ != NULL);
+std::string MarkSweep::GetName() const {
+  std::ostringstream ss;
+  ss << (IsConcurrent() ? "Concurrent" : "") << GetGcType();
+  return ss.str();
 }
 
-void MarkSweep::Init() {
+void MarkSweep::ImmuneSpace(ContinuousSpace* space) {
+  // Bind live to mark bitmap if necessary.
+  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+    BindLiveToMarkBitmap(space);
+  }
+
+  // Add the space to the immune region.
+  if (immune_begin_ == NULL) {
+    DCHECK(immune_end_ == NULL);
+    SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
+                   reinterpret_cast<Object*>(space->End()));
+  } else {
+      const Spaces& spaces = GetHeap()->GetSpaces();
+      const ContinuousSpace* prev_space = NULL;
+      // Find out if the previous space is immune.
+      // TODO: C++0x
+      for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+        if (*it == space) {
+          break;
+        }
+        prev_space = *it;
+      }
+
+      // If previous space was immune, then extend the immune region.
+      if (prev_space != NULL &&
+          immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
+          immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+    }
+  }
+}
+
+// Bind the live bits to the mark bits of bitmaps based on the gc type.
+void MarkSweep::BindBitmaps() {
+  Spaces& spaces = GetHeap()->GetSpaces();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+
+  // Mark all of the spaces we never collect as immune.
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyNeverCollect) {
+      ImmuneSpace(space);
+    }
+  }
+}
+
+MarkSweep::MarkSweep(Heap* heap, bool is_concurrent)
+    : GarbageCollector(heap),
+      gc_barrier_(new Barrier(0)),
+      large_object_lock_("large object lock"),
+      mark_stack_expand_lock_("mark stack expand lock"),
+      timings_(GetName(), true),
+      cumulative_timings_(GetName(), true),
+      is_concurrent_(is_concurrent) {
+  cumulative_timings_.SetName(GetName());
+  ResetCumulativeStatistics();
+}
+
+void MarkSweep::InitializePhase() {
+  mark_stack_ = GetHeap()->mark_stack_.get();
+  DCHECK(mark_stack_ != NULL);
+  finger_ = NULL;
+  SetImmuneRange(NULL, NULL);
+  soft_reference_list_ = NULL;
+  weak_reference_list_ = NULL;
+  finalizer_reference_list_ = NULL;
+  phantom_reference_list_ = NULL;
+  cleared_reference_list_ = NULL;
+  freed_bytes_ = 0;
+  freed_objects_ = 0;
+  class_count_ = 0;
+  array_count_ = 0;
+  other_count_ = 0;
+  large_object_test_ = 0;
+  large_object_mark_ = 0;
+  classes_marked_ = 0;
+  overhead_time_ = 0;
+  work_chunks_created_ = 0;
+  work_chunks_deleted_ = 0;
+  reference_count_ = 0;
   java_lang_Class_ = Class::GetJavaLangClass();
   CHECK(java_lang_Class_ != NULL);
-  heap_ = Runtime::Current()->GetHeap();
-  mark_stack_->Reset();
   FindDefaultMarkBitmap();
   // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
   Runtime::Current()->DirtyRoots();
+  timings_.Reset();
+  // Do any pre GC verification.
+  heap_->PreGcVerification(this);
+}
+
+void MarkSweep::ProcessReferences(Thread* self) {
+  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+                    &finalizer_reference_list_, &phantom_reference_list_);
+  timings_.AddSplit("ProcessReferences");
+}
+
+bool MarkSweep::HandleDirtyObjectsPhase() {
+  Thread* self = Thread::Current();
+  ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+
+    // Re-mark root set.
+    ReMarkRoots();
+    timings_.AddSplit("ReMarkRoots");
+
+    // Scan dirty objects, this is only required if we are not doing concurrent GC.
+    RecursiveMarkDirtyObjects();
+  }
+
+  ProcessReferences(self);
+
+  // Only need to do this if we have the card mark verification on, and only during concurrent GC.
+  if (GetHeap()->verify_missing_card_marks_) {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // This second sweep makes sure that we don't have any objects in the live stack which point to
+    // freed objects. These cause problems since their references may be previously freed objects.
+    SweepArray(timings_, allocation_stack, false);
+  } else {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // We only sweep over the live stack, and the live stack should not intersect with the
+    // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
+    heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
+                           GetHeap()->large_object_space_->GetMarkObjects(),
+                           allocation_stack);
+    timings_.AddSplit("UnMarkAllocStack");
+  }
+  return true;
+}
+
+bool MarkSweep::IsConcurrent() const {
+  return is_concurrent_;
+}
+
+void MarkSweep::MarkingPhase() {
+  Heap* heap = GetHeap();
+  Thread* self = Thread::Current();
+
+  BindBitmaps();
+  FindDefaultMarkBitmap();
+  timings_.AddSplit("BindBitmaps");
+
+  // Process dirty cards and add dirty cards to mod union tables.
+  heap->ProcessCards(timings_);
+
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  heap->SwapStacks();
+  timings_.AddSplit("SwapStacks");
+
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+    // If we exclusively hold the mutator lock, all threads must be suspended.
+    MarkRoots();
+    timings_.AddSplit("MarkConcurrentRoots");
+  } else {
+    MarkRootsCheckpoint();
+    timings_.AddSplit("MarkRootsCheckpoint");
+    MarkNonThreadRoots();
+    timings_.AddSplit("MarkNonThreadRoots");
+  }
+  MarkConcurrentRoots();
+  timings_.AddSplit("MarkConcurrentRoots");
+
+  heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
+  MarkReachableObjects();
+}
+
+void MarkSweep::MarkReachableObjects() {
+  // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+  // knowing that new allocations won't be marked as live.
+  ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
+                       heap_->large_object_space_->GetLiveObjects(),
+                       live_stack);
+  live_stack->Reset();
+  timings_.AddSplit("MarkStackAsLive");
+  // Recursively mark all the non-image bits set in the mark bitmap.
+  RecursiveMark();
+  DisableFinger();
+}
+
+void MarkSweep::ReclaimPhase() {
+  Thread* self = Thread::Current();
+
+  if (!IsConcurrent()) {
+    ProcessReferences(self);
+  }
+
+  // Before freeing anything, lets verify the heap.
+  if (kIsDebugBuild) {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    VerifyImageRoots();
+  }
+  heap_->PreSweepingGcVerification(this);
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+
+    // Reclaim unmarked objects.
+    Sweep(timings_, false);
+
+    // Swap the live and mark bitmaps for each space which we modified space. This is an
+    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+    // bitmaps.
+    SwapBitmaps();
+    timings_.AddSplit("SwapBitmaps");
+
+    // Unbind the live and mark bitmaps.
+    UnBindBitmaps();
+  }
+
+  heap_->GrowForUtilization();
+  timings_.AddSplit("GrowForUtilization");
+}
+
+void MarkSweep::SwapBitmaps() {
+  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
+  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
+  // bits of dead objects in the live bitmap.
+  const GcType gc_type = GetGcType();
+  // TODO: C++0x
+  Spaces& spaces = heap_->GetSpaces();
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    // We never allocate into zygote spaces.
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+        (gc_type == kGcTypeFull &&
+            space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (live_bitmap != mark_bitmap) {
+        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
+        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+        space->AsAllocSpace()->SwapBitmaps();
+      }
+    }
+  }
+  SwapLargeObjects();
+}
+
+void MarkSweep::SwapLargeObjects() {
+  LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
+  large_object_space->SwapBitmaps();
+  heap_->GetLiveBitmap()->SetLargeObjects(large_object_space->GetLiveObjects());
+  heap_->GetMarkBitmap()->SetLargeObjects(large_object_space->GetMarkObjects());
+}
+
+void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
+  immune_begin_ = begin;
+  immune_end_ = end;
 }
 
 void MarkSweep::FindDefaultMarkBitmap() {
@@ -238,6 +469,12 @@
   }
 }
 
+void MarkSweep::MarkRoot(const Object* obj) {
+  if (obj != NULL) {
+    MarkObjectNonNull(obj, false);
+  }
+}
+
 void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
   DCHECK(root != NULL);
   DCHECK(arg != NULL);
@@ -304,6 +541,9 @@
 
   void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
       NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     mark_sweep_->CheckReference(obj, ref, offset, is_static);
   }
 
@@ -325,12 +565,6 @@
   mark_sweep->CheckObject(root);
 }
 
-void MarkSweep::CopyMarkBits(ContinuousSpace* space) {
-  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-  mark_bitmap->CopyFrom(live_bitmap);
-}
-
 void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
   CHECK(space->IsAllocSpace());
   DlMallocSpace* alloc_space = space->AsAllocSpace();
@@ -347,9 +581,12 @@
 
   }
 
-  void operator ()(const Object* obj) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // TODO: Fixme when anotatalysis works with visitors.
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+    }
     mark_sweep_->ScanObject(obj);
   }
 
@@ -380,8 +617,10 @@
   }
 
   void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
-                            Locks::mutator_lock_) {
+      NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     DCHECK(obj != NULL);
     mark_sweep_->CheckObject(obj);
   }
@@ -410,7 +649,7 @@
 
 // Populates the mark stack based on the set of marked objects and
 // recursively marks until the mark stack is emptied.
-void MarkSweep::RecursiveMark(bool partial, TimingLogger& timings) {
+void MarkSweep::RecursiveMark() {
   // RecursiveMark will build the lists of known instances of the Reference classes.
   // See DelayReferenceReferent for details.
   CHECK(soft_reference_list_ == NULL);
@@ -419,61 +658,33 @@
   CHECK(phantom_reference_list_ == NULL);
   CHECK(cleared_reference_list_ == NULL);
 
+  const bool partial = GetGcType() == kGcTypePartial;
   const Spaces& spaces = heap_->GetSpaces();
   SetFingerVisitor set_finger_visitor(this);
   ScanObjectVisitor scan_visitor(this);
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if ((!kDisableFinger && space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
-        (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
-        ) {
-      current_mark_bitmap_ = space->GetMarkBitmap();
-      if (current_mark_bitmap_ == NULL) {
-        GetHeap()->DumpSpaces();
-        LOG(FATAL) << "invalid bitmap";
-      }
-      // This function does not handle heap end increasing, so we must use the space end.
-      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
-      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-      current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
-    }
-  }
-  finger_ = reinterpret_cast<Object*>(~0);
-  timings.AddSplit("RecursiveMark");
-  // TODO: tune the frequency of emptying the mark stack
-  ProcessMarkStack();
-  timings.AddSplit("ProcessMarkStack");
-}
-
-void MarkSweep::RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
-                                   TimingLogger& timings) {
-  ScanObjectVisitor image_root_visitor(this);
-  SetFingerVisitor finger_visitor(this);
-  const size_t card_count = cards.size();
-  SpaceBitmap* active_bitmap = NULL;
-  for (size_t i = 0;i < card_count;) {
-    Object* start_obj = reinterpret_cast<Object*>(card_table->AddrFromCard(cards[i]));
-    uintptr_t begin = reinterpret_cast<uintptr_t>(start_obj);
-    uintptr_t end = begin + CardTable::kCardSize;
-    for (++i; reinterpret_cast<uintptr_t>(cards[i]) == end && i < card_count; ++i) {
-      end += CardTable::kCardSize;
-    }
-    if (active_bitmap == NULL || !active_bitmap->HasAddress(start_obj)) {
-      active_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(start_obj);
-      if (kIsDebugBuild && active_bitmap == NULL) {
-        GetHeap()->DumpSpaces();
-        LOG(FATAL) << "Object " << reinterpret_cast<const void*>(start_obj);
+  if (!kDisableFinger) {
+    finger_ = NULL;
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      ContinuousSpace* space = *it;
+      if ((space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
+          (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
+          ) {
+        current_mark_bitmap_ = space->GetMarkBitmap();
+        if (current_mark_bitmap_ == NULL) {
+          GetHeap()->DumpSpaces();
+          LOG(FATAL) << "invalid bitmap";
+        }
+        // This function does not handle heap end increasing, so we must use the space end.
+        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+        uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+        current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
       }
     }
-    if (kDisableFinger) {
-      active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, VoidFunctor());
-    } else {
-      active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, finger_visitor);
-    }
   }
-  timings.AddSplit("RecursiveMarkCards");
+  DisableFinger();
+  timings_.AddSplit("RecursiveMark");
   ProcessMarkStack();
-  timings.AddSplit("ProcessMarkStack");
+  timings_.AddSplit("ProcessMarkStack");
 }
 
 bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
@@ -484,7 +695,9 @@
 
 void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
   ScanGrayObjects(minimum_age);
+  timings_.AddSplit("ScanGrayObjects");
   ProcessMarkStack();
+  timings_.AddSplit("ProcessMarkStack");
 }
 
 void MarkSweep::ReMarkRoots() {
@@ -613,6 +826,22 @@
   return *gc_barrier_;
 }
 
+const TimingLogger& MarkSweep::GetTimings() const {
+  return timings_;
+}
+
+const CumulativeLogger& MarkSweep::GetCumulativeTimings() const {
+  return cumulative_timings_;
+}
+
+void MarkSweep::ResetCumulativeStatistics() {
+  cumulative_timings_.Reset();
+  total_time_ = 0;
+  total_paused_time_ = 0;
+  total_freed_objects_ = 0;
+  total_freed_bytes_ = 0;
+}
+
 void MarkSweep::MarkRootsCheckpoint() {
   CheckpointMarkThreadRoots check_point(this);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
@@ -707,7 +936,7 @@
   logger.AddSplit("ResetStack");
 }
 
-void MarkSweep::Sweep(TimingLogger& timings, bool partial, bool swap_bitmaps) {
+void MarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
   DCHECK(mark_stack_->IsEmpty());
 
   // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
@@ -715,6 +944,7 @@
   SweepSystemWeaks();
   timings.AddSplit("SweepSystemWeaks");
 
+  const bool partial = GetGcType() == kGcTypePartial;
   const Spaces& spaces = heap_->GetSpaces();
   SweepCallbackContext scc;
   scc.mark_sweep = this;
@@ -746,6 +976,9 @@
     }
   }
   timings.AddSplit("Sweep");
+
+  SweepLargeObjects(swap_bitmaps);
+  timings.AddSplit("SweepLargeObjects");
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
@@ -857,8 +1090,14 @@
   MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
   }
 
+  // TODO: Fixme when anotatalysis works with visitors.
   void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
-                   bool /* is_static */) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+                   bool /* is_static */) const
+      NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+    }
     mark_sweep_->MarkObject(ref);
   }
 
@@ -1228,7 +1467,23 @@
   }
 }
 
-MarkSweep::~MarkSweep() {
+void MarkSweep::FinishPhase() {
+  // Can't enqueue referneces if we hold the mutator lock.
+  Object* cleared_references = GetClearedReferences();
+  heap_->EnqueueClearedReferences(&cleared_references);
+
+  heap_->PostGcVerification(this);
+
+  // Update the cumulative statistics
+  total_time_ += GetDuration();
+  total_paused_time_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                        std::plus<uint64_t>());
+  total_freed_objects_ += GetFreedObjects();
+  total_freed_bytes_ += GetFreedBytes();
+
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
   if (kCountScannedTypes) {
     VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
              << " other=" << other_count_;
@@ -1254,8 +1509,10 @@
     VLOG(gc) << "References scanned " << reference_count_;
   }
 
-  // Ensure that the mark stack is empty.
-  CHECK(mark_stack_->IsEmpty());
+  // Update the cumulative loggers.
+  cumulative_timings_.Start();
+  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.End();
 
   // Clear all of the spaces' mark bitmaps.
   const Spaces& spaces = heap_->GetSpaces();
@@ -1273,4 +1530,8 @@
   large_objects->GetMarkObjects()->Clear();
 }
 
+MarkSweep::~MarkSweep() {
+
+}
+
 }  // namespace art
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 98445d4..554577b 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -18,6 +18,7 @@
 #define ART_SRC_MARK_SWEEP_H_
 
 #include "atomic_stack.h"
+#include "garbage_collector.h"
 #include "macros.h"
 #include "heap_bitmap.h"
 #include "object.h"
@@ -37,12 +38,26 @@
 class TimingLogger;
 class MarkStackChunk;
 
-class MarkSweep {
+class MarkSweep : public GarbageCollector {
  public:
-  explicit MarkSweep(ObjectStack* mark_stack);
+  explicit MarkSweep(Heap* heap, bool is_concurrent);
 
   ~MarkSweep();
 
+  virtual std::string GetName() const;
+  virtual void InitializePhase();
+  virtual bool IsConcurrent() const;
+  virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase();
+  virtual void MarkReachableObjects()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const {
+    return kGcTypeFull;
+  }
+
   // Initializes internal structures.
   void Init();
 
@@ -61,19 +76,24 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void MarkRootsCheckpoint();
-       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Verify that image roots point to only marked objects within the alloc space.
   void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Builds a mark stack and recursively mark until it empties.
-  void RecursiveMark(bool partial, TimingLogger& timings)
+  void RecursiveMark()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs.
-  void CopyMarkBits(ContinuousSpace* space);
+  // Make a space immune, immune spaces are assumed to have all live objects marked.
+  void ImmuneSpace(ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
+
+  // Bind the live bits to the mark bits of bitmaps based on the gc type.
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void BindLiveToMarkBitmap(ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -86,32 +106,16 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Recursive mark objects on specified cards. Updates finger.
-  void RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
-                          TimingLogger& timings)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
-
   // Remarks the root set after completing the concurrent mark.
   void ReMarkRoots()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  Heap* GetHeap() {
-    return heap_;
-  }
-
-  void ProcessReferences(bool clear_soft_references)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    ProcessReferences(&soft_reference_list_, clear_soft_references,
-                      &weak_reference_list_,
-                      &finalizer_reference_list_,
-                      &phantom_reference_list_);
-  }
+  void ProcessReferences(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void Sweep(TimingLogger& timings, bool partial, bool swap_bitmaps)
+  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
@@ -122,6 +126,10 @@
   void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
+  virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
   Object* GetClearedReferences() {
     return cleared_reference_list_;
   }
@@ -187,12 +195,25 @@
     return freed_objects_;
   }
 
-  // Everything inside the immune range is marked.
-  void SetImmuneRange(Object* begin, Object* end) {
-    immune_begin_ = begin;
-    immune_end_ = end;
+  uint64_t GetTotalTime() const {
+    return total_time_;
   }
 
+  uint64_t GetTotalPausedTime() const {
+    return total_paused_time_;
+  }
+
+  uint64_t GetTotalFreedObjects() const {
+    return total_freed_objects_;
+  }
+
+  uint64_t GetTotalFreedBytes() const {
+    return total_freed_bytes_;
+  }
+
+  // Everything inside the immune range is assumed to be marked.
+  void SetImmuneRange(Object* begin, Object* end);
+
   void SweepSystemWeaks()
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
@@ -235,17 +256,26 @@
   }
 
   static void MarkObjectCallback(const Object* root, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void MarkRootParallelCallback(const Object* root, void* arg);
 
   // Marks an object.
   void MarkObject(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void MarkRoot(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   Barrier& GetBarrier();
+  const TimingLogger& GetTimings() const;
+  const CumulativeLogger& GetCumulativeTimings() const;
+  void ResetCumulativeStatistics();
 
- private:
+ protected:
   // Returns true if the object has its bit set in the mark bitmap.
   bool IsMarked(const Object* object) const;
 
@@ -256,6 +286,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void ReMarkObjectVisitor(const Object* root, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void VerifyImageRootVisitor(Object* root, void* arg)
@@ -263,6 +294,7 @@
                             Locks::mutator_lock_);
 
   void MarkObjectNonNull(const Object* obj, bool check_finger)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void MarkObjectNonNullParallel(const Object* obj, bool check_finger);
@@ -433,8 +465,6 @@
 
   ObjectStack* mark_stack_;
 
-  Heap* heap_;
-
   Object* finger_;
 
   // Immune range, every object inside the immune range is assumed to be marked.
@@ -460,14 +490,26 @@
   AtomicInteger work_chunks_deleted_;
   AtomicInteger reference_count_;
 
+  // Cumulative statistics.
+  uint64_t total_time_;
+  uint64_t total_paused_time_;
+  uint64_t total_freed_objects_;
+  uint64_t total_freed_bytes_;
+
   UniquePtr<Barrier> gc_barrier_;
   Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   Mutex mark_stack_expand_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  TimingLogger timings_;
+  CumulativeLogger cumulative_timings_;
+
+  bool is_concurrent_;
+  bool clear_soft_references_;
 
   friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
   friend class CheckBitmapVisitor;
   friend class CheckObjectVisitor;
   friend class CheckReferenceVisitor;
+  friend class Heap;
   friend class InternTableEntryIsUnmarked;
   friend class MarkIfReachesAllocspaceVisitor;
   friend class ModUnionCheckReferences;
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
index 5dd61e7..4d9ffe2 100644
--- a/src/gc/mod_union_table.cc
+++ b/src/gc/mod_union_table.cc
@@ -256,6 +256,7 @@
   }
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
+  // TODO: Fixme when anotatalysis works with visitors.
   void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
                      bool /* is_static */) const
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
@@ -290,9 +291,11 @@
       references_(references) {
   }
 
-  void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     DCHECK(obj != NULL);
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     CheckReferenceVisitor visitor(mod_union_table_, references_);
     MarkSweep::VisitObjectReferences(obj, visitor);
   }
@@ -306,7 +309,8 @@
   // Start by checking that everything in the mod union table is marked.
   Heap* heap = GetHeap();
   for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
-    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end();
+        ++it_ref ) {
       DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
     }
   }
@@ -368,7 +372,7 @@
   size_t count = 0;
   for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
     for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
-      mark_sweep->MarkObject(*it_ref);
+      mark_sweep->MarkRoot(*it_ref);
       ++count;
     }
   }
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
index 84592a4..f3da41c 100644
--- a/src/gc/mod_union_table.h
+++ b/src/gc/mod_union_table.h
@@ -109,7 +109,9 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Mark all references to the alloc space(s).
-  void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void MarkReferences(MarkSweep* mark_sweep)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
   // VisitMarkedRange can't know if the callback will modify the bitmap or not.
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/partial_mark_sweep.cc
new file mode 100644
index 0000000..64f09ff
--- /dev/null
+++ b/src/gc/partial_mark_sweep.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+#include "partial_mark_sweep.h"
+#include "space.h"
+
+namespace art {
+  PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
+      : MarkSweep(heap, is_concurrent) {
+    cumulative_timings_.SetName(GetName());
+  }
+
+  PartialMarkSweep::~PartialMarkSweep() {
+
+  }
+
+  void PartialMarkSweep::BindBitmaps() {
+    MarkSweep::BindBitmaps();
+
+    Spaces& spaces = GetHeap()->GetSpaces();
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
+    // zygote space are viewed as marked.
+    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      ContinuousSpace* space = *it;
+      if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+        ImmuneSpace(space);
+      }
+    }
+  }
+}  // namespace art
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
new file mode 100644
index 0000000..80a1563
--- /dev/null
+++ b/src/gc/partial_mark_sweep.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_PARTIAL_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "mark_sweep.h"
+#include "utils.h"
+
+namespace art {
+
+class Barrier;
+class CheckObjectVisitor;
+class Class;
+class Heap;
+class MarkIfReachesAllocspaceVisitor;
+class ModUnionClearCardVisitor;
+class ModUnionVisitor;
+class ModUnionTableBitmap;
+class Object;
+class TimingLogger;
+class MarkStackChunk;
+
+class PartialMarkSweep : public MarkSweep {
+ public:
+  virtual GcType GetGcType() const {
+    return kGcTypePartial;
+  }
+
+  explicit PartialMarkSweep(Heap* heap, bool is_concurrent);
+  ~PartialMarkSweep();
+
+protected:
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
new file mode 100644
index 0000000..23196fd
--- /dev/null
+++ b/src/gc/sticky_mark_sweep.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+#include "space.h"
+#include "sticky_mark_sweep.h"
+
+namespace art {
+  StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
+      : PartialMarkSweep(heap, is_concurrent) {
+    cumulative_timings_.SetName(GetName());
+  }
+
+  StickyMarkSweep::~StickyMarkSweep() {
+
+  }
+
+  void StickyMarkSweep::BindBitmaps() {
+    PartialMarkSweep::BindBitmaps();
+
+    Spaces& spaces = GetHeap()->GetSpaces();
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
+    // This lets us start with the mark bitmap of the previous garbage collection as the current
+    // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
+    // making it so that the live bitmap of the alloc space is contains the newly marked objects
+    // from the sticky GC.
+    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+        BindLiveToMarkBitmap(*it);
+      }
+    }
+
+    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+  }
+
+  void StickyMarkSweep::MarkReachableObjects() {
+    DisableFinger();
+    RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
+  }
+
+  void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+    ObjectStack* live_stack = GetHeap()->GetLiveStack();
+    SweepArray(timings_, live_stack, false);
+    timings_.AddSplit("SweepArray");
+  }
+}  // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/sticky_mark_sweep.h
new file mode 100644
index 0000000..9c3b6a4
--- /dev/null
+++ b/src/gc/sticky_mark_sweep.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_STICKY_MARK_SWEEP_H_
+#define ART_SRC_STICKY_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "macros.h"
+#include "partial_mark_sweep.h"
+#include "utils.h"
+
+namespace art {
+
+class Barrier;
+class CheckObjectVisitor;
+class Class;
+class Heap;
+class MarkIfReachesAllocspaceVisitor;
+class ModUnionClearCardVisitor;
+class ModUnionVisitor;
+class ModUnionTableBitmap;
+class Object;
+class TimingLogger;
+class MarkStackChunk;
+
+class StickyMarkSweep : public PartialMarkSweep {
+ public:
+  virtual GcType GetGcType() const {
+    return kGcTypeSticky;
+  }
+
+  explicit StickyMarkSweep(Heap* heap, bool is_concurrent);
+  ~StickyMarkSweep();
+protected:
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  virtual void MarkReachableObjects()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_STICKY_MARK_SWEEP_H_