Refactor and remove copy mark bits.

Refactor code GC realted code to be in a GC folder.

Remove copy mark bits by using pointer changing instead.

Enable concurrent sweeping of system weaks.

Fix non concurrent GC plan.

Change-Id: I9c71478be27d21a75f8a4e6af6faabe896e5e263
diff --git a/src/gc/atomic_stack.h b/src/gc/atomic_stack.h
new file mode 100644
index 0000000..3494861
--- /dev/null
+++ b/src/gc/atomic_stack.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ATOMIC_STACK_H_
+#define ART_SRC_ATOMIC_STACK_H_
+
+#include <string>
+
+#include "atomic_integer.h"
+#include "UniquePtr.h"
+#include "logging.h"
+#include "macros.h"
+#include "mem_map.h"
+#include "utils.h"
+
+namespace art {
+
+template <typename T>
+class AtomicStack {
+ public:
+  // Capacity is how many elements we can store in the stack.
+  static AtomicStack* Create(const std::string& name, size_t capacity) {
+    UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
+    mark_stack->Init();
+    return mark_stack.release();
+  }
+
+  ~AtomicStack(){
+
+  }
+
+  void Reset() {
+    DCHECK(mem_map_.get() != NULL);
+    DCHECK(begin_ != NULL);
+    front_index_ = 0;
+    back_index_ = 0;
+    int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
+    if (result == -1) {
+      PLOG(WARNING) << "madvise failed";
+    }
+  }
+
+  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
+
+  // Returns false if we overflowed the stack.
+  bool AtomicPushBack(const T& value) {
+    const int32_t index = back_index_++;
+    if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
+      // Stack overflow.
+      back_index_--;
+      return false;
+    }
+    begin_[index] = value;
+    return true;
+  }
+
+  void PushBack(const T& value) {
+    int32_t index = back_index_;
+    DCHECK_LT(static_cast<size_t>(index), capacity_);
+    back_index_ = index + 1;
+    begin_[index] = value;
+  }
+
+  T PopBack() {
+    DCHECK_GT(back_index_, front_index_);
+    // Decrement the back index non atomically.
+    back_index_ = back_index_ - 1;
+    return begin_[back_index_];
+  }
+
+  T AtomicPopBack() {
+    // Decrement the back index non atomically.
+    int back_index = back_index_--;
+    DCHECK_GT(back_index, front_index_);
+    return begin_[back_index - 1];
+  }
+
+  // Take an item from the front of the stack.
+  T PopFront() {
+    int32_t index = front_index_;
+    DCHECK_LT(index, back_index_.get());
+    front_index_ = front_index_ + 1;
+    return begin_[index];
+  }
+
+  bool IsEmpty() const {
+    return Size() == 0;
+  }
+
+  size_t Size() const {
+    DCHECK_LE(front_index_, back_index_);
+    return back_index_ - front_index_;
+  }
+
+  T* Begin() {
+    return const_cast<Object**>(begin_ + front_index_);
+  }
+
+  T* End() {
+    return const_cast<Object**>(begin_ + back_index_);
+  }
+
+  size_t Capacity() const {
+    return capacity_;
+  }
+
+  // Will clear the stack.
+  void Resize(size_t new_capacity) {
+    capacity_ = new_capacity;
+    Init();
+  }
+
+ private:
+  AtomicStack(const std::string& name, const size_t capacity)
+      : name_(name),
+        back_index_(0),
+        front_index_(0),
+        begin_(NULL),
+        capacity_(capacity) {
+
+  }
+
+  // Size in number of elements.
+  void Init() {
+    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T), PROT_READ | PROT_WRITE));
+    if (mem_map_.get() == NULL) {
+      std::string maps;
+      ReadFileToString("/proc/self/maps", &maps);
+      LOG(FATAL) << "couldn't allocate mark stack\n" << maps;
+    }
+    byte* addr = mem_map_->Begin();
+    CHECK(addr != NULL);
+    begin_ = reinterpret_cast<T*>(addr);
+    Reset();
+  }
+
+  // Name of the mark stack.
+  std::string name_;
+
+  // Memory mapping of the atomic stack.
+  UniquePtr<MemMap> mem_map_;
+
+  // Back index (index after the last element pushed).
+  AtomicInteger back_index_;
+
+  // Front index, used for implementing PopFront.
+  AtomicInteger front_index_;
+
+  // Base of the atomic stack.
+  T* begin_;
+
+  // Maximum number of elements.
+  size_t capacity_;
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_MARK_STACK_H_
diff --git a/src/gc/card_table.cc b/src/gc/card_table.cc
new file mode 100644
index 0000000..5a1e9b5
--- /dev/null
+++ b/src/gc/card_table.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "card_table.h"
+
+#include <dynamic_annotations.h>
+
+#include "heap.h"
+#include "heap_bitmap.h"
+#include "logging.h"
+#include "runtime.h"
+#include "space.h"
+#include "utils.h"
+
+namespace art {
+/*
+ * Maintain a card table from the write barrier. All writes of
+ * non-NULL values to heap addresses should go through an entry in
+ * WriteBarrier, and from there to here.
+ *
+ * The heap is divided into "cards" of GC_CARD_SIZE bytes, as
+ * determined by GC_CARD_SHIFT. The card table contains one byte of
+ * data per card, to be used by the GC. The value of the byte will be
+ * one of GC_CARD_CLEAN or GC_CARD_DIRTY.
+ *
+ * After any store of a non-NULL object pointer into a heap object,
+ * code is obliged to mark the card dirty. The setters in
+ * object.h [such as SetFieldObject] do this for you. The
+ * compiler also contains code to mark cards as dirty.
+ *
+ * The card table's base [the "biased card table"] gets set to a
+ * rather strange value.  In order to keep the JIT from having to
+ * fabricate or load GC_DIRTY_CARD to store into the card table,
+ * biased base is within the mmap allocation at a point where its low
+ * byte is equal to GC_DIRTY_CARD. See CardTable::Create for details.
+ */
+
+CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
+  /* Set up the card table */
+  size_t capacity = heap_capacity / kCardSize;
+  /* Allocate an extra 256 bytes to allow fixed low-byte of base */
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous("dalvik-card-table", NULL,
+                                                 capacity + 256, PROT_READ | PROT_WRITE));
+  if (mem_map.get() == NULL) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    LOG(FATAL) << "couldn't allocate card table\n" << maps;
+  }
+  // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
+  // don't clear the card table to avoid unnecessary pages being allocated
+  COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
+
+  byte* cardtable_begin = mem_map->Begin();
+  CHECK(cardtable_begin != NULL);
+
+  // We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
+  // GC_CARD_DIRTY, compute a offset value to make this the case
+  size_t offset = 0;
+  byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
+      (reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
+  if (((uintptr_t)biased_begin & 0xff) != kCardDirty) {
+    int delta = kCardDirty - (reinterpret_cast<int>(biased_begin) & 0xff);
+    offset = delta + (delta < 0 ? 0x100 : 0);
+    biased_begin += offset;
+  }
+  CHECK_EQ(reinterpret_cast<int>(biased_begin) & 0xff, kCardDirty);
+
+  return new CardTable(mem_map.release(), biased_begin, offset);
+}
+
+CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset)
+    : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
+  byte* __attribute__((unused)) begin = mem_map_->Begin() + offset_;
+  byte* __attribute__((unused)) end = mem_map_->End();
+  ANNOTATE_BENIGN_RACE_SIZED(begin, (end - begin), "writes to GC card table");
+}
+
+void CardTable::ClearSpaceCards(ContinuousSpace* space) {
+  // TODO: clear just the range of the table that has been modified
+  byte* card_start = CardFromAddr(space->Begin());
+  byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
+  memset(reinterpret_cast<void*>(card_start), kCardClean, card_end - card_start);
+}
+
+void CardTable::ClearCardTable() {
+  // TODO: clear just the range of the table that has been modified
+  memset(mem_map_->Begin(), kCardClean, mem_map_->Size());
+}
+
+void CardTable::PreClearCards(ContinuousSpace* space, std::vector<byte*>& out_cards) {
+  byte* card_end = CardFromAddr(space->End());
+  for (byte* card_cur = CardFromAddr(space->Begin()); card_cur < card_end; ++card_cur) {
+    if (*card_cur == kCardDirty) {
+      out_cards.push_back(card_cur);
+      *card_cur = kCardClean;
+    }
+  }
+}
+
+void CardTable::GetDirtyCards(ContinuousSpace* space, std::vector<byte*>& out_cards) const {
+  byte* card_end = CardFromAddr(space->End());
+  for (byte* card_cur = CardFromAddr(space->Begin());card_cur < card_end; ++card_cur) {
+    if (*card_cur == kCardDirty) {
+      out_cards.push_back(card_cur);
+    }
+  }
+}
+
+bool CardTable::AddrIsInCardTable(const void* addr) const {
+  return IsValidCard(biased_begin_ + ((uintptr_t)addr >> kCardShift));
+}
+
+void CardTable::CheckAddrIsInCardTable(const byte* addr) const {
+  byte* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+  byte* begin = mem_map_->Begin() + offset_;
+  byte* end = mem_map_->End();
+  CHECK(AddrIsInCardTable(addr))
+      << "Card table " << this
+      << " begin: " << reinterpret_cast<void*>(begin)
+      << " end: " << reinterpret_cast<void*>(end)
+      << " card_addr: " << reinterpret_cast<void*>(card_addr)
+      << " heap begin: " << AddrFromCard(begin)
+      << " heap end: " << AddrFromCard(end)
+      << " addr: " << reinterpret_cast<const void*>(addr);
+}
+
+void CardTable::VerifyCardTable() {
+  UNIMPLEMENTED(WARNING) << "Card table verification";
+}
+
+}  // namespace art
diff --git a/src/gc/card_table.h b/src/gc/card_table.h
new file mode 100644
index 0000000..2715f52
--- /dev/null
+++ b/src/gc/card_table.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_CARDTABLE_H_
+#define ART_SRC_GC_CARDTABLE_H_
+
+#include "globals.h"
+#include "logging.h"
+#include "mem_map.h"
+#include "space_bitmap.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+class Heap;
+class ContinuousSpace;
+class SpaceBitmap;
+class Object;
+
+// Maintain a card table from the the write barrier. All writes of
+// non-NULL values to heap addresses should go through an entry in
+// WriteBarrier, and from there to here.
+class CardTable {
+ public:
+  static const size_t kCardShift = 7;
+  static const size_t kCardSize = (1 << kCardShift);
+  static const uint8_t kCardClean  = 0x0;
+  static const uint8_t kCardDirty = 0x70;
+
+  static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
+
+  // Set the card associated with the given address to GC_CARD_DIRTY.
+  void MarkCard(const void *addr) {
+    byte* card_addr = CardFromAddr(addr);
+    *card_addr = kCardDirty;
+  }
+
+  // Is the object on a dirty card?
+  bool IsDirty(const Object* obj) const {
+    return *CardFromAddr(obj) == kCardDirty;
+  }
+
+  // Visit and clear cards within memory range, only visits dirty cards.
+  template <typename Visitor>
+  void VisitClear(const void* start, const void* end, const Visitor& visitor) {
+    byte* card_start = CardFromAddr(start);
+    byte* card_end = CardFromAddr(end);
+    for (byte* it = card_start; it != card_end; ++it) {
+      if (*it == kCardDirty) {
+        *it = kCardClean;
+        visitor(it);
+      }
+    }
+  }
+
+  // Returns a value that when added to a heap address >> GC_CARD_SHIFT will address the appropriate
+  // card table byte. For convenience this value is cached in every Thread
+  byte* GetBiasedBegin() const {
+    return biased_begin_;
+  }
+
+  // For every dirty card between begin and end invoke the visitor with the specified argument.
+  template <typename Visitor, typename FingerVisitor>
+  void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
+            const Visitor& visitor, const FingerVisitor& finger_visitor) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(bitmap->HasAddress(scan_begin));
+    DCHECK(bitmap->HasAddress(scan_end - 1));  // scan_end is the byte after the last byte we scan.
+    byte* card_cur = CardFromAddr(scan_begin);
+    byte* card_end = CardFromAddr(scan_end);
+    while (card_cur < card_end) {
+      // Find the first dirty card.
+      card_cur = reinterpret_cast<byte*>(memchr(card_cur, kCardDirty, card_end - card_cur));
+      if (card_cur == NULL) {
+        break;
+      }
+      byte* run_start = card_cur++;
+
+      while (*card_cur == kCardDirty && card_cur < card_end) {
+        card_cur++;
+      }
+      byte* run_end = card_cur;
+
+      uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(run_start));
+      uintptr_t end = reinterpret_cast<uintptr_t>(AddrFromCard(run_end));
+      bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+    }
+  }
+
+  // Assertion used to check the given address is covered by the card table
+  void CheckAddrIsInCardTable(const byte* addr) const;
+
+  // Resets all of the bytes in the card table to clean.
+  void ClearCardTable();
+
+  // Resets all of the bytes in the card table which do not map to the image space.
+  void ClearSpaceCards(ContinuousSpace* space);
+
+  // Clean all the cards which map to a space.
+  void PreClearCards(ContinuousSpace* space, std::vector<byte*>& out_cards);
+
+  // Returns all of the dirty cards which map to a space.
+  void GetDirtyCards(ContinuousSpace* space, std::vector<byte*>& out_cards) const;
+
+  // Returns the first address in the heap which maps to this card.
+  void* AddrFromCard(const byte *card_addr) const {
+    DCHECK(IsValidCard(card_addr))
+      << " card_addr: " << reinterpret_cast<const void*>(card_addr)
+      << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
+      << " end: " << reinterpret_cast<void*>(mem_map_->End());
+    uintptr_t offset = card_addr - biased_begin_;
+    return reinterpret_cast<void*>(offset << kCardShift);
+  }
+
+  // Returns the address of the relevant byte in the card table, given an address on the heap.
+  byte* CardFromAddr(const void *addr) const {
+    byte *card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
+    // Sanity check the caller was asking for address covered by the card table
+    DCHECK(IsValidCard(card_addr)) << "addr: " << addr
+        << " card_addr: " << reinterpret_cast<void*>(card_addr);
+    return card_addr;
+  }
+
+  bool AddrIsInCardTable(const void* addr) const;
+
+ private:
+  CardTable(MemMap* begin, byte* biased_begin, size_t offset);
+
+  // Returns true iff the card table address is within the bounds of the card table.
+  bool IsValidCard(const byte* card_addr) const {
+    byte* begin = mem_map_->Begin() + offset_;
+    byte* end = mem_map_->End();
+    return card_addr >= begin && card_addr < end;
+  }
+
+  // Verifies that all gray objects are on a dirty card.
+  void VerifyCardTable();
+
+  // Mmapped pages for the card table
+  UniquePtr<MemMap> mem_map_;
+  // Value used to compute card table addresses from object addresses, see GetBiasedBegin
+  byte* const biased_begin_;
+  // Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
+  // to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
+  const size_t offset_;
+};
+
+}  // namespace art
+#endif  // DALVIK_ALLOC_CARDTABLE_H_
diff --git a/src/gc/heap_bitmap.cc b/src/gc/heap_bitmap.cc
new file mode 100644
index 0000000..cef6884
--- /dev/null
+++ b/src/gc/heap_bitmap.cc
@@ -0,0 +1,49 @@
+#include "heap_bitmap.h"
+#include "space.h"
+
+namespace art {
+
+void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
+  // TODO: C++0x auto
+  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+    if (*it == old_bitmap) {
+      *it = new_bitmap;
+      return;
+    }
+  }
+  LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+}
+
+void HeapBitmap::AddSpaceBitmap(SpaceBitmap* bitmap) {
+  DCHECK(bitmap != NULL);
+
+  // Check for interval overlap.
+  for (Bitmaps::const_iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+    SpaceBitmap* cur_bitmap = *it;
+    if (bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
+        bitmap->HeapLimit() > cur_bitmap->HeapBegin()) {
+      LOG(FATAL) << "Overlapping space bitmaps added to heap bitmap!";
+    }
+  }
+  bitmaps_.push_back(bitmap);
+}
+
+void HeapBitmap::SetLargeObjects(SpaceSetMap* large_objects) {
+  DCHECK(large_objects != NULL);
+  large_objects_ = large_objects;
+}
+
+HeapBitmap::HeapBitmap(Heap* heap) : heap_(heap), large_objects_(NULL) {
+
+}
+
+void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+  // TODO: C++0x auto
+  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+    (*it)->Walk(callback, arg);
+  }
+
+  large_objects_->Walk(callback, arg);
+}
+
+}  // namespace art
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h
new file mode 100644
index 0000000..23dcd47
--- /dev/null
+++ b/src/gc/heap_bitmap.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_HEAP_BITMAP_H_
+#define ART_SRC_HEAP_BITMAP_H_
+
+#include "space_bitmap.h"
+
+namespace art {
+  class Heap;
+  class SpaceBitmap;
+
+  class HeapBitmap {
+   public:
+    bool Test(const Object* obj)
+        SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+      if (LIKELY(bitmap != NULL)) {
+        return bitmap->Test(obj);
+      } else {
+        return large_objects_->Test(obj);
+      }
+    }
+
+    void Clear(const Object* obj)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+      if (LIKELY(bitmap != NULL)) {
+        return bitmap->Clear(obj);
+      } else {
+        return large_objects_->Clear(obj);
+      }
+    }
+
+    void Set(const Object* obj)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+      if (LIKELY(bitmap != NULL)) {
+        bitmap->Set(obj);
+      } else {
+        large_objects_->Set(obj);
+      }
+    }
+
+    SpaceBitmap* GetSpaceBitmap(const Object* obj) {
+      // TODO: C++0x auto
+      for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+        if ((*it)->HasAddress(obj)) {
+          return *it;
+        }
+      }
+      return NULL;
+    }
+
+    void Walk(SpaceBitmap::Callback* callback, void* arg)
+        SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+    template <typename Visitor>
+    void Visit(const Visitor& visitor)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      // TODO: C++0x auto
+      for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+        SpaceBitmap* bitmap = *it;
+        bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor,
+                                 IdentityFunctor());
+      }
+      large_objects_->Visit(visitor);
+    }
+
+    // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
+    void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+    HeapBitmap(Heap* heap);
+
+    inline SpaceSetMap* GetLargeObjects() const {
+      return large_objects_;
+    }
+
+    void SetLargeObjects(SpaceSetMap* large_objects);
+
+   private:
+
+    const Heap* const heap_;
+
+    void AddSpaceBitmap(SpaceBitmap* bitmap);
+
+    typedef std::vector<SpaceBitmap*> Bitmaps;
+    Bitmaps bitmaps_;
+
+    // Large object sets.
+    SpaceSetMap* large_objects_;
+
+    friend class Heap;
+  };
+}  // namespace art
+
+#endif  // ART_SRC_HEAP_BITMAP_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
new file mode 100644
index 0000000..b82bc6e
--- /dev/null
+++ b/src/gc/mark_sweep.cc
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mark_sweep.h"
+
+#include <climits>
+#include <vector>
+
+#include "card_table.h"
+#include "class_loader.h"
+#include "dex_cache.h"
+#include "heap.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "logging.h"
+#include "macros.h"
+#include "monitor.h"
+#include "object.h"
+#include "runtime.h"
+#include "space.h"
+#include "timing_logger.h"
+#include "thread.h"
+
+static const bool kUseMarkStackPrefetch = true;
+
+namespace art {
+
+class SetFingerVisitor {
+ public:
+  SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+
+  }
+
+  void operator ()(void* finger) const {
+    mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
+MarkSweep::MarkSweep(ObjectStack* mark_stack)
+    : current_mark_bitmap_(NULL),
+      mark_stack_(mark_stack),
+      heap_(NULL),
+      finger_(NULL),
+      immune_begin_(NULL),
+      immune_end_(NULL),
+      soft_reference_list_(NULL),
+      weak_reference_list_(NULL),
+      finalizer_reference_list_(NULL),
+      phantom_reference_list_(NULL),
+      cleared_reference_list_(NULL),
+      freed_bytes_(0), freed_objects_(0),
+      class_count_(0), array_count_(0), other_count_(0) {
+  DCHECK(mark_stack_ != NULL);
+}
+
+void MarkSweep::Init() {
+  heap_ = Runtime::Current()->GetHeap();
+  mark_stack_->Reset();
+  // TODO: C++0x auto
+  FindDefaultMarkBitmap();
+  // TODO: if concurrent, enable card marking in compiler
+  // TODO: check that the mark bitmap is entirely clear.
+}
+
+void MarkSweep::FindDefaultMarkBitmap() {
+  const Spaces& spaces = heap_->GetSpaces();
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+      current_mark_bitmap_ = (*it)->GetMarkBitmap();
+      CHECK(current_mark_bitmap_ != NULL);
+      return;
+    }
+  }
+  GetHeap()->DumpSpaces();
+  LOG(FATAL) << "Could not find a default mark bitmap";
+}
+
+inline void MarkSweep::MarkObject0(const Object* obj, bool check_finger) {
+  DCHECK(obj != NULL);
+
+  if (obj >= immune_begin_ && obj < immune_end_) {
+    DCHECK(IsMarked(obj));
+    return;
+  }
+
+  // Try to take advantage of locality of references within a space, failing this find the space
+  // the hard way.
+  if (UNLIKELY(!current_mark_bitmap_->HasAddress(obj))) {
+    SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
+    if (new_bitmap != NULL) {
+      current_mark_bitmap_ = new_bitmap;
+    } else {
+      LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+      SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+      if (!large_objects->Test(obj)) {
+        CHECK(large_object_space->Contains(obj)) << "Attempting to mark object " << obj
+                                                  << " not in large object space";
+        large_objects->Set(obj);
+        // Don't need to check finger since large objects never have any object references.
+      }
+      // TODO: Improve clarity of control flow in this function?
+      return;
+    }
+  }
+
+  // This object was not previously marked.
+  if (!current_mark_bitmap_->Test(obj)) {
+    current_mark_bitmap_->Set(obj);
+    if (check_finger && obj < finger_) {
+      // Do we need to expand the mark stack?
+      if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+        std::vector<Object*> temp;
+        temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
+        mark_stack_->Resize(mark_stack_->Capacity() * 2);
+        for (size_t i = 0; i < temp.size(); ++i) {
+          mark_stack_->PushBack(temp[i]);
+        }
+      }
+      // The object must be pushed on to the mark stack.
+      mark_stack_->PushBack(const_cast<Object*>(obj));
+    }
+  }
+}
+
+// Used to mark objects when recursing.  Recursion is done by moving
+// the finger across the bitmaps in address order and marking child
+// objects.  Any newly-marked objects whose addresses are lower than
+// the finger won't be visited by the bitmap scan, so those objects
+// need to be added to the mark stack.
+void MarkSweep::MarkObject(const Object* obj) {
+  if (obj != NULL) {
+    MarkObject0(obj, true);
+  }
+}
+
+void MarkSweep::MarkObjectVisitor(const Object* root, void* arg) {
+  DCHECK(root != NULL);
+  DCHECK(arg != NULL);
+  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+  mark_sweep->MarkObject0(root, false);
+}
+
+void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
+  DCHECK(root != NULL);
+  DCHECK(arg != NULL);
+  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+  mark_sweep->MarkObject0(root, true);
+}
+
+// Marks all objects in the root set.
+void MarkSweep::MarkRoots() {
+  Runtime::Current()->VisitRoots(MarkObjectVisitor, this);
+}
+
+class CheckObjectVisitor {
+ public:
+  CheckObjectVisitor(MarkSweep* const mark_sweep)
+      : mark_sweep_(mark_sweep) {
+
+  }
+
+  void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    mark_sweep_->CheckReference(obj, ref, offset, is_static);
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
+void MarkSweep::CheckObject(const Object* obj) {
+  DCHECK(obj != NULL);
+  CheckObjectVisitor visitor(this);
+  VisitObjectReferences(obj, visitor);
+}
+
+void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
+  DCHECK(root != NULL);
+  DCHECK(arg != NULL);
+  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+  DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
+  mark_sweep->CheckObject(root);
+}
+
+void MarkSweep::CopyMarkBits(ContinuousSpace* space) {
+  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+  mark_bitmap->CopyFrom(live_bitmap);
+}
+
+void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
+  CHECK(space->IsAllocSpace());
+  AllocSpace* alloc_space = space->AsAllocSpace();
+  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+  alloc_space->temp_bitmap_.reset(mark_bitmap);
+  alloc_space->mark_bitmap_.reset(live_bitmap);
+}
+
+class ScanImageRootVisitor {
+ public:
+  ScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+  }
+
+  void operator ()(const Object* root) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(root != NULL);
+    mark_sweep_->ScanObject(root);
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
+void MarkSweep::ScanGrayObjects(bool update_finger) {
+  const Spaces& spaces = heap_->GetSpaces();
+  CardTable* card_table = heap_->GetCardTable();
+  ScanImageRootVisitor image_root_visitor(this);
+  SetFingerVisitor finger_visitor(this);
+  // TODO: C++ 0x auto
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    byte* begin = space->Begin();
+    byte* end = space->End();
+    // Image spaces are handled properly since live == marked for them.
+    SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+    if (update_finger) {
+      card_table->Scan(mark_bitmap, begin, end, image_root_visitor, finger_visitor);
+    } else {
+      card_table->Scan(mark_bitmap, begin, end, image_root_visitor, IdentityFunctor());
+    }
+  }
+}
+
+class CheckBitmapVisitor {
+ public:
+  CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+
+  }
+
+  void operator ()(const Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    mark_sweep_->CheckObject(obj);
+  }
+
+ private:
+  MarkSweep* mark_sweep_;
+};
+
+void MarkSweep::VerifyImageRoots() {
+  // Verify roots ensures that all the references inside the image space point
+  // objects which are either in the image space or marked objects in the alloc
+  // space
+  CheckBitmapVisitor visitor(this);
+  const Spaces& spaces = heap_->GetSpaces();
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    if ((*it)->IsImageSpace()) {
+      ImageSpace* space = (*it)->AsImageSpace();
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      DCHECK(live_bitmap != NULL);
+      live_bitmap->VisitMarkedRange(begin, end, visitor, IdentityFunctor());
+    }
+  }
+}
+
+class ScanObjectVisitor {
+ public:
+  ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+
+  }
+
+  void operator ()(const Object* obj) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mark_sweep_->ScanObject(obj);
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
+// Populates the mark stack based on the set of marked objects and
+// recursively marks until the mark stack is emptied.
+void MarkSweep::RecursiveMark(bool partial, TimingLogger& timings) {
+  // RecursiveMark will build the lists of known instances of the Reference classes.
+  // See DelayReferenceReferent for details.
+  CHECK(soft_reference_list_ == NULL);
+  CHECK(weak_reference_list_ == NULL);
+  CHECK(finalizer_reference_list_ == NULL);
+  CHECK(phantom_reference_list_ == NULL);
+  CHECK(cleared_reference_list_ == NULL);
+
+  const Spaces& spaces = heap_->GetSpaces();
+
+  SetFingerVisitor set_finger_visitor(this);
+  ScanObjectVisitor scan_visitor(this);
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+        (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
+        ) {
+      current_mark_bitmap_ = space->GetMarkBitmap();
+      if (current_mark_bitmap_ == NULL) {
+        GetHeap()->DumpSpaces();
+        LOG(FATAL) << "invalid bitmap";
+      }
+      // This function does not handle heap end increasing, so we must use the space end.
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
+    }
+  }
+  finger_ = reinterpret_cast<Object*>(~0);
+  timings.AddSplit("RecursiveMark");
+  // TODO: tune the frequency of emptying the mark stack
+  ProcessMarkStack();
+  timings.AddSplit("ProcessMarkStack");
+}
+
+void MarkSweep::RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
+                                   TimingLogger& timings) {
+  ScanImageRootVisitor image_root_visitor(this);
+  SetFingerVisitor finger_visitor(this);
+  const size_t card_count = cards.size();
+  SpaceBitmap* active_bitmap = NULL;
+  for (size_t i = 0;i < card_count;) {
+    Object* start_obj = reinterpret_cast<Object*>(card_table->AddrFromCard(cards[i]));
+    uintptr_t begin = reinterpret_cast<uintptr_t>(start_obj);
+    uintptr_t end = begin + CardTable::kCardSize;
+    for (++i; reinterpret_cast<uintptr_t>(cards[i]) == end && i < card_count; ++i) {
+      end += CardTable::kCardSize;
+    }
+    if (active_bitmap == NULL || !active_bitmap->HasAddress(start_obj)) {
+      active_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(start_obj);
+#ifndef NDEBUG
+      if (active_bitmap == NULL) {
+        GetHeap()->DumpSpaces();
+        LOG(FATAL) << "Object " << reinterpret_cast<const void*>(start_obj);
+      }
+#endif
+    }
+    active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, finger_visitor);
+  }
+  timings.AddSplit("RecursiveMarkCards");
+  ProcessMarkStack();
+  timings.AddSplit("ProcessMarkStack");
+}
+
+bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
+  return
+      reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
+      !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
+}
+
+void MarkSweep::RecursiveMarkDirtyObjects(bool update_finger) {
+  ScanGrayObjects(update_finger);
+  ProcessMarkStack();
+}
+
+void MarkSweep::ReMarkRoots() {
+  Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
+}
+
+void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) {
+  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
+  IndirectReferenceTable* table = &vm->weak_globals;
+  typedef IndirectReferenceTable::iterator It;  // TODO: C++0x auto
+  for (It it = table->begin(), end = table->end(); it != end; ++it) {
+    const Object** entry = *it;
+    if (!is_marked(*entry, arg)) {
+      *entry = kClearedJniWeakGlobal;
+    }
+  }
+}
+
+struct ArrayMarkedCheck {
+  ObjectStack* live_stack;
+  MarkSweep* mark_sweep;
+};
+
+// Either marked or not live.
+bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
+  ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
+  if (array_check->mark_sweep->IsMarked(object)) {
+    return true;
+  }
+  ObjectStack* live_stack = array_check->live_stack;
+  return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
+}
+
+void MarkSweep::SweepSystemWeaksArray(ObjectStack* allocations) {
+  Runtime* runtime = Runtime::Current();
+  // The callbacks check
+  // !is_marked where is_marked is the callback but we want
+  // !IsMarked && IsLive
+  // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
+  // Or for swapped (IsLive || !IsMarked).
+
+  ArrayMarkedCheck visitor;
+  visitor.live_stack = allocations;
+  visitor.mark_sweep = this;
+  runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
+  runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
+  SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
+}
+
+void MarkSweep::SweepSystemWeaks() {
+  Runtime* runtime = Runtime::Current();
+  // The callbacks check
+  // !is_marked where is_marked is the callback but we want
+  // !IsMarked && IsLive
+  // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
+  // Or for swapped (IsLive || !IsMarked).
+  runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
+  runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
+  SweepJniWeakGlobals(IsMarkedCallback, this);
+}
+
+bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
+  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
+  // We don't actually want to sweep the object, so lets return "marked"
+  return true;
+}
+
+void MarkSweep::VerifyIsLive(const Object* obj) {
+  Heap* heap = GetHeap();
+  if (!heap->GetLiveBitmap()->Test(obj)) {
+    LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+    if (!large_object_space->GetLiveObjects()->Test(obj)) {
+      if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
+          heap->allocation_stack_->End()) {
+        // Object not found!
+        heap->DumpSpaces();
+        LOG(FATAL) << "Found dead object " << obj;
+      }
+    }
+  }
+}
+
+void MarkSweep::VerifySystemWeaks() {
+  Runtime* runtime = Runtime::Current();
+  // Verify system weaks, uses a special IsMarked callback which always returns true.
+  runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
+  runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
+
+  JavaVMExt* vm = runtime->GetJavaVM();
+  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
+  IndirectReferenceTable* table = &vm->weak_globals;
+  typedef IndirectReferenceTable::iterator It;  // TODO: C++0x auto
+  for (It it = table->begin(), end = table->end(); it != end; ++it) {
+    const Object** entry = *it;
+    VerifyIsLive(*entry);
+  }
+}
+
+struct SweepCallbackContext {
+  MarkSweep* mark_sweep;
+  AllocSpace* space;
+  Thread* self;
+};
+
+void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  size_t freed_objects = num_ptrs;
+  size_t freed_bytes = 0;
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  MarkSweep* mark_sweep = context->mark_sweep;
+  Heap* heap = mark_sweep->GetHeap();
+  AllocSpace* space = context->space;
+  Thread* self = context->self;
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+  // Use a bulk free, that merges consecutive objects before freeing or free per object?
+  // Documentation suggests better free performance with merging, but this may be at the expensive
+  // of allocation.
+  // TODO: investigate performance
+  static const bool kUseFreeList = true;
+  if (kUseFreeList) {
+    for (size_t i = 0; i < num_ptrs; ++i) {
+      Object* obj = static_cast<Object*>(ptrs[i]);
+      freed_bytes += space->AllocationSize(obj);
+    }
+    // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
+    space->FreeList(self, num_ptrs, ptrs);
+  } else {
+    for (size_t i = 0; i < num_ptrs; ++i) {
+      Object* obj = static_cast<Object*>(ptrs[i]);
+      freed_bytes += space->AllocationSize(obj);
+      space->Free(self, obj);
+    }
+  }
+
+  heap->RecordFree(freed_objects, freed_bytes);
+  mark_sweep->freed_objects_ += freed_objects;
+  mark_sweep->freed_bytes_ += freed_bytes;
+}
+
+void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+  Heap* heap = context->mark_sweep->GetHeap();
+  // We don't free any actual memory to avoid dirtying the shared zygote pages.
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Object* obj = static_cast<Object*>(ptrs[i]);
+    heap->GetLiveBitmap()->Clear(obj);
+    heap->GetCardTable()->MarkCard(obj);
+  }
+}
+
+void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
+  size_t freed_bytes = 0;
+  AllocSpace* space = heap_->GetAllocSpace();
+
+  // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
+  // bitmap, resulting in occasional frees of Weaks which are still in use.
+  // TODO: Fix when sweeping weaks works properly with mutators unpaused + allocation list.
+  SweepSystemWeaksArray(allocations);
+  logger.AddSplit("SweepSystemWeaks");
+
+  // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
+  // going to free.
+  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+  LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  if (swap_bitmaps) {
+    std::swap(live_bitmap, mark_bitmap);
+    std::swap(large_live_objects, large_mark_objects);
+  }
+
+  size_t freed_large_objects = 0;
+  size_t count = allocations->Size();
+  Object** objects = const_cast<Object**>(allocations->Begin());
+  Object** out = objects;
+
+  // Empty the allocation stack.
+  Thread* self = Thread::Current();
+  for (size_t i = 0;i < count;++i) {
+    Object* obj = objects[i];
+    // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
+    if (LIKELY(mark_bitmap->HasAddress(obj))) {
+      if (!mark_bitmap->Test(obj)) {
+        // Don't bother un-marking since we clear the mark bitmap anyways.
+        *(out++) = obj;
+        size_t size = space->AllocationSize(obj);
+        freed_bytes += size;
+      }
+    } else if (!large_mark_objects->Test(obj)) {
+      ++freed_large_objects;
+      size_t size = large_object_space->AllocationSize(obj);
+      freed_bytes += size;
+      large_object_space->Free(self, obj);
+    }
+  }
+  logger.AddSplit("Process allocation stack");
+
+  size_t freed_objects = out - objects;
+  VLOG(heap) << "Freed " << freed_objects << "/" << count
+             << " objects with size " << PrettySize(freed_bytes);
+  space->FreeList(self, freed_objects, objects);
+  heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
+  freed_objects_ += freed_objects;
+  freed_bytes_ += freed_bytes;
+  logger.AddSplit("FreeList");
+  allocations->Reset();
+  logger.AddSplit("Reset stack");
+}
+
+void MarkSweep::Sweep(bool partial, bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+
+  // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
+  // bitmap, resulting in occasional frees of Weaks which are still in use.
+  SweepSystemWeaks();
+
+  const Spaces& spaces = heap_->GetSpaces();
+  SweepCallbackContext scc;
+  scc.mark_sweep = this;
+  scc.self = Thread::Current();
+  // TODO: C++0x auto
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (
+        space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+        (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
+        ) {
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      scc.space = space->AsAllocSpace();
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (swap_bitmaps) {
+        std::swap(live_bitmap, mark_bitmap);
+      }
+      if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+        // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+        SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                               &SweepCallback, reinterpret_cast<void*>(&scc));
+      } else {
+        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual memory.
+        SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                               &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+      }
+    }
+  }
+}
+
+void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
+  // Sweep large objects
+  LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  if (swap_bitmaps) {
+    std::swap(large_live_objects, large_mark_objects);
+  }
+  SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
+  // O(n*log(n)) but hopefully there are not too many large objects.
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  // TODO: C++0x
+  Thread* self = Thread::Current();
+  for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
+    if (!large_mark_objects->Test(*it)) {
+      freed_bytes += large_object_space->AllocationSize(*it);
+      large_object_space->Free(self, const_cast<Object*>(*it));
+      ++freed_objects;
+    }
+  }
+  freed_objects_ += freed_objects;
+  freed_bytes_ += freed_bytes;
+  // Large objects don't count towards bytes_allocated.
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+// Scans instance fields.
+inline void MarkSweep::ScanInstanceFields(const Object* obj) {
+  DCHECK(obj != NULL);
+  Class* klass = obj->GetClass();
+  DCHECK(klass != NULL);
+  ScanFields(obj, klass->GetReferenceInstanceOffsets(), false);
+}
+
+// Scans static storage on a Class.
+inline void MarkSweep::ScanStaticFields(const Class* klass) {
+  DCHECK(klass != NULL);
+  ScanFields(klass, klass->GetReferenceStaticOffsets(), true);
+}
+
+inline void MarkSweep::ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static) {
+  if (ref_offsets != CLASS_WALK_SUPER) {
+    // Found a reference offset bitmap.  Mark the specified offsets.
+    while (ref_offsets != 0) {
+      const size_t right_shift = CLZ(ref_offsets);
+      MemberOffset byte_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+      const Object* ref = obj->GetFieldObject<const Object*>(byte_offset, false);
+      MarkObject(ref);
+      ref_offsets ^= CLASS_HIGH_BIT >> right_shift;
+    }
+  } else {
+    // There is no reference offset bitmap.  In the non-static case,
+    // walk up the class inheritance hierarchy and find reference
+    // offsets the hard way. In the static case, just consider this
+    // class.
+    for (const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+         klass != NULL;
+         klass = is_static ? NULL : klass->GetSuperClass()) {
+      size_t num_reference_fields = (is_static
+                                     ? klass->NumReferenceStaticFields()
+                                     : klass->NumReferenceInstanceFields());
+      for (size_t i = 0; i < num_reference_fields; ++i) {
+        Field* field = (is_static
+                        ? klass->GetStaticField(i)
+                        : klass->GetInstanceField(i));
+        MemberOffset field_offset = field->GetOffset();
+        const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
+        MarkObject(ref);
+      }
+    }
+  }
+}
+
+void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
+  const Spaces& spaces = heap_->GetSpaces();
+  // TODO: C++0x auto
+  for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
+    if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
+      DCHECK(IsMarked(obj));
+
+      bool is_marked = IsMarked(ref);
+      if (!is_marked) {
+        LOG(INFO) << **cur;
+        LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
+                     << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
+                     << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
+                     << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
+
+        const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+        DCHECK(klass != NULL);
+        const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
+        DCHECK(fields != NULL);
+        bool found = false;
+        for (int32_t i = 0; i < fields->GetLength(); ++i) {
+          const Field* cur = fields->Get(i);
+          if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
+            LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
+            found = true;
+            break;
+          }
+        }
+        if (!found) {
+          LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
+        }
+
+        bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
+        if (!obj_marked) {
+          LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
+                       << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
+                       << "the alloc space, but wasn't card marked";
+        }
+      }
+    }
+    break;
+  }
+}
+
+// Scans the header, static field references, and interface pointers
+// of a class object.
+inline void MarkSweep::ScanClass(const Object* obj) {
+#ifndef NDEBUG
+  ++class_count_;
+#endif
+  ScanInstanceFields(obj);
+  ScanStaticFields(obj->AsClass());
+}
+
+// Scans the header of all array objects.  If the array object is
+// specialized to a reference type, scans the array data as well.
+inline void MarkSweep::ScanArray(const Object* obj) {
+#ifndef NDEBUG
+  ++array_count_;
+#endif
+  MarkObject(obj->GetClass());
+  if (obj->IsObjectArray()) {
+    const ObjectArray<Object>* array = obj->AsObjectArray<Object>();
+    for (int32_t i = 0; i < array->GetLength(); ++i) {
+      const Object* element = array->GetWithoutChecks(i);
+      MarkObject(element);
+    }
+  }
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the
+// referent has not yet been marked, put it on the appropriate list in
+// the gcHeap for later processing.
+void MarkSweep::DelayReferenceReferent(Object* obj) {
+  DCHECK(obj != NULL);
+  Class* klass = obj->GetClass();
+  DCHECK(klass != NULL);
+  DCHECK(klass->IsReferenceClass());
+  Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
+  Object* referent = heap_->GetReferenceReferent(obj);
+  if (pending == NULL && referent != NULL && !IsMarked(referent)) {
+    Object** list = NULL;
+    if (klass->IsSoftReferenceClass()) {
+      list = &soft_reference_list_;
+    } else if (klass->IsWeakReferenceClass()) {
+      list = &weak_reference_list_;
+    } else if (klass->IsFinalizerReferenceClass()) {
+      list = &finalizer_reference_list_;
+    } else if (klass->IsPhantomReferenceClass()) {
+      list = &phantom_reference_list_;
+    }
+    DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
+    heap_->EnqueuePendingReference(obj, list);
+  }
+}
+
+// Scans the header and field references of a data object.  If the
+// scanned object is a reference subclass, it is scheduled for later
+// processing.
+inline void MarkSweep::ScanOther(const Object* obj) {
+#ifndef NDEBUG
+  ++other_count_;
+#endif
+  ScanInstanceFields(obj);
+  if (obj->GetClass()->IsReferenceClass()) {
+    DelayReferenceReferent(const_cast<Object*>(obj));
+  }
+}
+
+void MarkSweep::ScanRoot(const Object* obj) {
+  ScanObject(obj);
+}
+
+// Scans an object reference.  Determines the type of the reference
+// and dispatches to a specialized scanning routine.
+void MarkSweep::ScanObject(const Object* obj) {
+  DCHECK(obj != NULL);
+  DCHECK(obj->GetClass() != NULL);
+#ifndef NDEBUG
+  if (!IsMarked(obj)) {
+    heap_->DumpSpaces();
+    LOG(FATAL) << "Scanning unmarked object " << reinterpret_cast<const void*>(obj);
+  }
+#endif
+  if (obj->IsClass()) {
+    ScanClass(obj);
+  } else if (obj->IsArrayInstance()) {
+    ScanArray(obj);
+  } else {
+    ScanOther(obj);
+  }
+}
+
+// Scan anything that's on the mark stack.
+void MarkSweep::ProcessMarkStack() {
+  if (kUseMarkStackPrefetch) {
+    const size_t fifo_size = 4;
+    const size_t fifo_mask = fifo_size - 1;
+    const Object* fifo[fifo_size];
+    for (size_t i = 0;i < fifo_size;++i) {
+      fifo[i] = NULL;
+    }
+    size_t fifo_pos = 0;
+    size_t fifo_count = 0;
+    for (;;) {
+      const Object* obj = fifo[fifo_pos & fifo_mask];
+      if (obj != NULL) {
+        ScanObject(obj);
+        fifo[fifo_pos & fifo_mask] = NULL;
+        --fifo_count;
+      }
+
+      if (!mark_stack_->IsEmpty()) {
+        const Object* obj = mark_stack_->PopBack();
+        DCHECK(obj != NULL);
+        fifo[fifo_pos & fifo_mask] = obj;
+        __builtin_prefetch(obj);
+        fifo_count++;
+      }
+      fifo_pos++;
+
+      if (!fifo_count) {
+        CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
+        break;
+      }
+    }
+  } else {
+    while (!mark_stack_->IsEmpty()) {
+      const Object* obj = mark_stack_->PopBack();
+      DCHECK(obj != NULL);
+      ScanObject(obj);
+    }
+  }
+}
+
+// Walks the reference list marking any references subject to the
+// reference clearing policy.  References with a black referent are
+// removed from the list.  References with white referents biased
+// toward saving are blackened and also removed from the list.
+void MarkSweep::PreserveSomeSoftReferences(Object** list) {
+  DCHECK(list != NULL);
+  Object* clear = NULL;
+  size_t counter = 0;
+
+  DCHECK(mark_stack_->IsEmpty());
+
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent == NULL) {
+      // Referent was cleared by the user during marking.
+      continue;
+    }
+    bool is_marked = IsMarked(referent);
+    if (!is_marked && ((++counter) & 1)) {
+      // Referent is white and biased toward saving, mark it.
+      MarkObject(referent);
+      is_marked = true;
+    }
+    if (!is_marked) {
+      // Referent is white, queue it for clearing.
+      heap_->EnqueuePendingReference(ref, &clear);
+    }
+  }
+  *list = clear;
+  // Restart the mark with the newly black references added to the
+  // root set.
+  ProcessMarkStack();
+}
+
+inline bool MarkSweep::IsMarked(const Object* object) const
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  if (object >= immune_begin_ && object < immune_end_) {
+    return true;
+  }
+  DCHECK(current_mark_bitmap_ != NULL);
+  if (current_mark_bitmap_->HasAddress(object)) {
+    return current_mark_bitmap_->Test(object);
+  }
+  return heap_->GetMarkBitmap()->Test(object);
+}
+
+
+// Unlink the reference list clearing references objects with white
+// referents.  Cleared references registered to a reference queue are
+// scheduled for appending by the heap worker thread.
+void MarkSweep::ClearWhiteReferences(Object** list) {
+  DCHECK(list != NULL);
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != NULL && !IsMarked(referent)) {
+      // Referent is white, clear it.
+      heap_->ClearReferenceReferent(ref);
+      if (heap_->IsEnqueuable(ref)) {
+        heap_->EnqueueReference(ref, &cleared_reference_list_);
+      }
+    }
+  }
+  DCHECK(*list == NULL);
+}
+
+// Enqueues finalizer references with white referents.  White
+// referents are blackened, moved to the zombie field, and the
+// referent field is cleared.
+void MarkSweep::EnqueueFinalizerReferences(Object** list) {
+  DCHECK(list != NULL);
+  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
+  bool has_enqueued = false;
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != NULL && !IsMarked(referent)) {
+      MarkObject(referent);
+      // If the referent is non-null the reference must queuable.
+      DCHECK(heap_->IsEnqueuable(ref));
+      ref->SetFieldObject(zombie_offset, referent, false);
+      heap_->ClearReferenceReferent(ref);
+      heap_->EnqueueReference(ref, &cleared_reference_list_);
+      has_enqueued = true;
+    }
+  }
+  if (has_enqueued) {
+    ProcessMarkStack();
+  }
+  DCHECK(*list == NULL);
+}
+
+// Process reference class instances and schedule finalizations.
+void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
+                                  Object** weak_references,
+                                  Object** finalizer_references,
+                                  Object** phantom_references) {
+  DCHECK(soft_references != NULL);
+  DCHECK(weak_references != NULL);
+  DCHECK(finalizer_references != NULL);
+  DCHECK(phantom_references != NULL);
+
+  // Unless we are in the zygote or required to clear soft references
+  // with white references, preserve some white referents.
+  if (!clear_soft && !Runtime::Current()->IsZygote()) {
+    PreserveSomeSoftReferences(soft_references);
+  }
+
+  // Clear all remaining soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+
+  // Preserve all white objects with finalize methods and schedule
+  // them for finalization.
+  EnqueueFinalizerReferences(finalizer_references);
+
+  // Clear all f-reachable soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+
+  // Clear all phantom references with white referents.
+  ClearWhiteReferences(phantom_references);
+
+  // At this point all reference lists should be empty.
+  DCHECK(*soft_references == NULL);
+  DCHECK(*weak_references == NULL);
+  DCHECK(*finalizer_references == NULL);
+  DCHECK(*phantom_references == NULL);
+}
+
+void MarkSweep::UnBindBitmaps() {
+  const Spaces& spaces = heap_->GetSpaces();
+  // TODO: C++0x auto
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    Space* space = *it;
+    if (space->IsAllocSpace()) {
+      AllocSpace* alloc_space = space->AsAllocSpace();
+      if (alloc_space->temp_bitmap_.get() != NULL) {
+        // At this point, the temp_bitmap holds our old mark bitmap.
+        SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
+        GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
+        CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
+        alloc_space->mark_bitmap_.reset(new_bitmap);
+        DCHECK(alloc_space->temp_bitmap_.get() == NULL);
+      }
+    }
+  }
+}
+
+MarkSweep::~MarkSweep() {
+#ifndef NDEBUG
+  VLOG(heap) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ << " other=" << other_count_;
+#endif
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
+  // Clear all of the alloc spaces' mark bitmaps.
+  const Spaces& spaces = heap_->GetSpaces();
+  // TODO: C++0x auto
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+      space->GetMarkBitmap()->Clear();
+    }
+  }
+  mark_stack_->Reset();
+
+  // Reset the marked large objects.
+  LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+  large_objects->GetMarkObjects()->Clear();
+}
+
+}  // namespace art
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
new file mode 100644
index 0000000..74b2aa7
--- /dev/null
+++ b/src/gc/mark_sweep.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_MARK_SWEEP_H_
+#define ART_SRC_MARK_SWEEP_H_
+
+#include "atomic_stack.h"
+#include "macros.h"
+#include "heap_bitmap.h"
+#include "object.h"
+#include "offsets.h"
+
+namespace art {
+
+class CheckObjectVisitor;
+class Class;
+class Heap;
+class MarkIfReachesAllocspaceVisitor;
+class ModUnionClearCardVisitor;
+class ModUnionVisitor;
+class ModUnionTableBitmap;
+class Object;
+class TimingLogger;
+
+class MarkSweep {
+ public:
+  explicit MarkSweep(ObjectStack* mark_stack);
+
+  ~MarkSweep();
+
+  // Initializes internal structures.
+  void Init();
+
+  // Find the default mark bitmap.
+  void FindDefaultMarkBitmap();
+
+  // Marks the root set at the start of a garbage collection.
+  void MarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Verify that image roots point to only marked objects within the alloc space.
+  void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Builds a mark stack and recursively mark until it empties.
+  void RecursiveMark(bool partial, TimingLogger& timings)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs.
+  void CopyMarkBits(ContinuousSpace* space);
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void BindLiveToMarkBitmap(ContinuousSpace* space)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void UnBindBitmaps()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Builds a mark stack with objects on dirty cards and recursively mark
+  // until it empties.
+  void RecursiveMarkDirtyObjects(bool update_finger)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Recursive mark objects on specified cards. Updates finger.
+  void RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
+                          TimingLogger& timings)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
+
+  // Remarks the root set after completing the concurrent mark.
+  void ReMarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  Heap* GetHeap() {
+    return heap_;
+  }
+
+  void ProcessReferences(bool clear_soft_references)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    ProcessReferences(&soft_reference_list_, clear_soft_references,
+                      &weak_reference_list_,
+                      &finalizer_reference_list_,
+                      &phantom_reference_list_);
+  }
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void Sweep(bool partial, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void SweepLargeObjects(bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
+
+  // Sweep only pointers within an array. WARNING: Trashes objects.
+  void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  Object* GetClearedReferences() {
+    return cleared_reference_list_;
+  }
+
+  // Proxy for external access to ScanObject.
+  void ScanRoot(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Blackens an object.
+  void ScanObject(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void SetFinger(Object* new_finger) {
+    finger_ = new_finger;
+  }
+
+  void DisableFinger() {
+    SetFinger(reinterpret_cast<Object*>(~static_cast<uintptr_t>(0)));
+  }
+
+  size_t GetFreedBytes() const {
+    return freed_bytes_;
+  }
+
+  size_t GetFreedObjects() const {
+    return freed_objects_;
+  }
+
+  // Everything inside the immune range is marked.
+  void SetImmuneRange(Object* begin, Object* end) {
+    immune_begin_ = begin;
+    immune_end_ = end;
+  }
+
+  void SweepSystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Only sweep the weaks which are inside of an allocation stack.
+  void SweepSystemWeaksArray(ObjectStack* allocations)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static bool VerifyIsLiveCallback(const Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void VerifySystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Verify that an object is live, either in a live bitmap or in the allocation stack.
+  void VerifyIsLive(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  template <typename Visitor>
+  static void VisitObjectReferences(const Object* obj, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    DCHECK(obj->GetClass() != NULL);
+    if (obj->IsClass()) {
+      VisitClassReferences(obj, visitor);
+    } else if (obj->IsArrayInstance()) {
+      VisitArrayReferences(obj, visitor);
+    } else {
+      VisitOtherReferences(obj, visitor);
+    }
+  }
+
+ private:
+  // Returns true if the object has its bit set in the mark bitmap.
+  bool IsMarked(const Object* object) const;
+
+  static bool IsMarkedCallback(const Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static bool IsMarkedArrayCallback(const Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void MarkObjectVisitor(const Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void ReMarkObjectVisitor(const Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void VerifyImageRootVisitor(Object* root, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_);
+
+  static void ScanDirtyCardCallback(Object* obj, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Marks an object.
+  void MarkObject(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Yuck.
+  void MarkObject0(const Object* obj, bool check_finger)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void ScanBitmapCallback(Object* obj, void* finger, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Special sweep for zygote that just marks objects / dirties cards.
+  static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  void CheckObject(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Grays references in instance fields.
+  void ScanInstanceFields(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitInstanceFieldsReferences(const Object* obj, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    Class* klass = obj->GetClass();
+    DCHECK(klass != NULL);
+    VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
+  }
+
+  // Blackens a class object.
+  void ScanClass(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+
+  template <typename Visitor>
+  static void VisitClassReferences(const Object* obj, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    VisitInstanceFieldsReferences(obj, visitor);
+    VisitStaticFieldsReferences(obj->AsClass(), visitor);
+  }
+
+  // Grays references in static fields.
+  void ScanStaticFields(const Class* klass)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    DCHECK(klass != NULL);
+    VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
+  }
+
+  // Used by ScanInstanceFields and ScanStaticFields
+  void ScanFields(const Object* obj, uint32_t ref_offsets, bool is_static)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static,
+                             const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    if (ref_offsets != CLASS_WALK_SUPER) {
+      // Found a reference offset bitmap.  Mark the specified offsets.
+      while (ref_offsets != 0) {
+        size_t right_shift = CLZ(ref_offsets);
+        MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+        const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
+        visitor(obj, ref, field_offset, is_static);
+        ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+      }
+    } else {
+      // There is no reference offset bitmap.  In the non-static case,
+      // walk up the class inheritance hierarchy and find reference
+      // offsets the hard way. In the static case, just consider this
+      // class.
+      for (const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+           klass != NULL;
+           klass = is_static ? NULL : klass->GetSuperClass()) {
+        size_t num_reference_fields = (is_static
+                                       ? klass->NumReferenceStaticFields()
+                                       : klass->NumReferenceInstanceFields());
+        for (size_t i = 0; i < num_reference_fields; ++i) {
+          Field* field = (is_static
+                          ? klass->GetStaticField(i)
+                          : klass->GetInstanceField(i));
+          MemberOffset field_offset = field->GetOffset();
+          const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
+          visitor(obj, ref, field_offset, is_static);
+        }
+      }
+    }
+  }
+
+  // Grays references in an array.
+  void ScanArray(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitArrayReferences(const Object* obj, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    visitor(obj, obj->GetClass(), Object::ClassOffset(), false);
+    if (obj->IsObjectArray()) {
+      const ObjectArray<Object>* array = obj->AsObjectArray<Object>();
+      for (int32_t i = 0; i < array->GetLength(); ++i) {
+        const Object* element = array->GetWithoutChecks(i);
+        size_t width = sizeof(Object*);
+        visitor(obj, element, MemberOffset(i * width + Array::DataOffset(width).Int32Value()), false);
+      }
+    }
+  }
+
+  void ScanOther(const Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitOtherReferences(const Object* obj, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    return VisitInstanceFieldsReferences(obj, visitor);
+  }
+
+  // Blackens objects grayed during a garbage collection.
+  void ScanGrayObjects(bool update_finger)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Schedules an unmarked object for reference processing.
+  void DelayReferenceReferent(Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Recursively blackens objects on the mark stack.
+  void ProcessMarkStack()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void EnqueueFinalizerReferences(Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void PreserveSomeSoftReferences(Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void ClearWhiteReferences(Object** list)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  void ProcessReferences(Object** soft_references, bool clear_soft_references,
+                         Object** weak_references,
+                         Object** finalizer_references,
+                         Object** phantom_references)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Current space, we check this space first to avoid searching for the appropriate space for an object.
+  SpaceBitmap* current_mark_bitmap_;
+
+  ObjectStack* mark_stack_;
+
+  Heap* heap_;
+
+  Object* finger_;
+
+  // Immune range, every object inside the immune range is assumed to be marked.
+  Object* immune_begin_;
+  Object* immune_end_;
+
+  Object* soft_reference_list_;
+
+  Object* weak_reference_list_;
+
+  Object* finalizer_reference_list_;
+
+  Object* phantom_reference_list_;
+
+  Object* cleared_reference_list_;
+
+  size_t freed_bytes_;
+  size_t freed_objects_;
+
+  size_t class_count_;
+  size_t array_count_;
+  size_t other_count_;
+
+  friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
+  friend class CheckBitmapVisitor;
+  friend class CheckObjectVisitor;
+  friend class CheckReferenceVisitor;
+  friend class InternTableEntryIsUnmarked;
+  friend class MarkIfReachesAllocspaceVisitor;
+  friend class ModUnionCheckReferences;
+  friend class ModUnionClearCardVisitor;
+  friend class ModUnionReferenceVisitor;
+  friend class ModUnionVisitor;
+  friend class ModUnionTableBitmap;
+  friend class ModUnionTableReferenceCache;
+  friend class ModUnionScanImageRootVisitor;
+  friend class ScanBitmapVisitor;
+  friend class ScanImageRootVisitor;
+
+  DISALLOW_COPY_AND_ASSIGN(MarkSweep);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_MARK_SWEEP_H_
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
new file mode 100644
index 0000000..967f795
--- /dev/null
+++ b/src/gc/mod_union_table.cc
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap.h"
+#include "heap_bitmap.h"
+#include "mark_sweep.h"
+#include "mod_union_table.h"
+#include "space.h"
+#include "stl_util.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+class MarkIfReachesAllocspaceVisitor {
+ public:
+  explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, SpaceBitmap* bitmap)
+    : heap_(heap),
+      bitmap_(bitmap) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
+    // TODO: Optimize?
+    // TODO: C++0x auto
+    const Spaces& spaces = heap_->GetSpaces();
+    for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
+      if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
+        bitmap_->Set(obj);
+        break;
+      }
+    }
+  }
+
+ private:
+  Heap* const heap_;
+  SpaceBitmap* bitmap_;
+};
+
+class ModUnionVisitor {
+ public:
+  explicit ModUnionVisitor(Heap* const heap, SpaceBitmap* bitmap)
+    : heap_(heap),
+      bitmap_(bitmap) {
+  }
+
+  void operator ()(Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    // We don't have an early exit since we use the visitor pattern, an early exit should
+    // significantly speed this up.
+    MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
+    MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+ private:
+  Heap* const heap_;
+  SpaceBitmap* bitmap_;
+};
+
+class ModUnionClearCardSetVisitor {
+ public:
+  explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
+    : cleared_cards_(cleared_cards) {
+  }
+
+  inline void operator ()(byte* card) const {
+    cleared_cards_->insert(card);
+  }
+ private:
+  std::set<byte*>* const cleared_cards_;
+};
+
+class ModUnionClearCardVisitor {
+ public:
+  explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+    : cleared_cards_(cleared_cards) {
+  }
+
+  void operator ()(byte* card) const {
+    cleared_cards_->push_back(card);
+  }
+ private:
+  std::vector<byte*>* cleared_cards_;
+};
+
+ModUnionTableBitmap::ModUnionTableBitmap(Heap* heap) : ModUnionTable(heap)  {
+  // Prevent fragmentation of the heap which is caused by resizing of the vector.
+  // TODO: Make a new vector which uses madvise (basically same as a mark stack).
+  cleared_cards_.reserve(32);
+  const Spaces& spaces = heap->GetSpaces();
+  // Create one heap bitmap per image space.
+  // TODO: C++0x auto
+  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->IsImageSpace()) {
+      // The mod-union table is only needed when we have an image space since it's purpose is to
+      // cache image roots.
+      UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", space->Begin(),
+                                                        space->Size()));
+      CHECK(bitmap.get() != NULL) << "Failed to create mod-union bitmap";
+      bitmaps_.Put(space, bitmap.release());
+    }
+  }
+}
+
+ModUnionTableBitmap::~ModUnionTableBitmap() {
+  STLDeleteValues(&bitmaps_);
+}
+
+void ModUnionTableBitmap::ClearCards(ContinuousSpace* space) {
+  CardTable* card_table = heap_->GetCardTable();
+  ModUnionClearCardVisitor visitor(&cleared_cards_);
+  // Clear dirty cards in the this image space and update the corresponding mod-union bits.
+  card_table->VisitClear(space->Begin(), space->End(), visitor);
+}
+
+void ModUnionTableBitmap::Update() {
+  CardTable* card_table = heap_->GetCardTable();
+  while (!cleared_cards_.empty()) {
+    byte* card = cleared_cards_.back();
+    cleared_cards_.pop_back();
+
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    ContinuousSpace* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
+    SpaceBitmap* bitmap = space->GetLiveBitmap();
+
+    // Clear the mod-union bitmap range corresponding to this card so that we don't have any
+    // objects marked which do not reach the alloc space.
+    bitmap->VisitRange(start, end, SpaceBitmap::ClearVisitor(bitmap));
+
+    // At this point we need to update the mod-union bitmap to contain all the objects which reach
+    // the alloc space.
+    ModUnionVisitor visitor(heap_, bitmap);
+    space->GetLiveBitmap()->VisitMarkedRange(start, end, visitor, IdentityFunctor());
+  }
+}
+
+class ModUnionScanImageRootVisitor {
+ public:
+  ModUnionScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+  }
+
+  void operator ()(const Object* root) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(root != NULL);
+    mark_sweep_->ScanRoot(root);
+  }
+
+ private:
+  MarkSweep* const mark_sweep_;
+};
+
+void ModUnionTableBitmap::MarkReferences(MarkSweep* mark_sweep) {
+  // Some tests have no image space, and therefore no mod-union bitmap.
+  ModUnionScanImageRootVisitor image_root_scanner(mark_sweep);
+  for (BitmapMap::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+    const ContinuousSpace* space = it->first;
+    uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+    uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+    it->second->VisitMarkedRange(begin, end, image_root_scanner, IdentityFunctor());
+  }
+}
+
+
+ModUnionTableReferenceCache::ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {
+
+}
+
+ModUnionTableReferenceCache::~ModUnionTableReferenceCache() {
+
+}
+
+void ModUnionTableReferenceCache::ClearCards(ContinuousSpace* space) {
+  CardTable* card_table = GetHeap()->GetCardTable();
+  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+  // Clear dirty cards in the this space and update the corresponding mod-union bits.
+  card_table->VisitClear(space->Begin(), space->End(), visitor);
+}
+
+class AddToReferenceArrayVisitor {
+ public:
+  explicit AddToReferenceArrayVisitor(
+      ModUnionTableReferenceCache* const mod_union_table,
+        ModUnionTableReferenceCache::ReferenceArray* references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                     bool /* is_static */) const {
+    // Only add the reference if it fits our criteria.
+    if (mod_union_table_->AddReference(obj, ref)) {
+      references_->push_back(ref);
+    }
+  }
+
+ private:
+  ModUnionTableReferenceCache* mod_union_table_;
+  ModUnionTable::ReferenceArray* references_;
+};
+
+class ModUnionReferenceVisitor {
+ public:
+  explicit ModUnionReferenceVisitor(
+        ModUnionTableReferenceCache* const mod_union_table,
+        ModUnionTableReferenceCache::ReferenceArray* references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  void operator ()(Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    // We don't have an early exit since we use the visitor pattern, an early
+    // exit should significantly speed this up.
+    AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
+    MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  ModUnionTable::ReferenceArray* references_;
+};
+
+
+class CheckReferenceVisitor {
+ public:
+  typedef std::set<const Object*> ReferenceSet;
+
+  explicit CheckReferenceVisitor(
+      ModUnionTableReferenceCache* const mod_union_table,
+      const ReferenceSet& references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                     bool /* is_static */) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    Heap* heap = mod_union_table_->GetHeap();
+    if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) {
+      ContinuousSpace* from_space = heap->FindSpaceFromObject(obj);
+      ContinuousSpace* to_space = heap->FindSpaceFromObject(ref);
+      LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
+                << "References " << reinterpret_cast<const void*>(ref)
+                << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
+      LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
+      LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
+      mod_union_table_->GetHeap()->DumpSpaces();
+      LOG(FATAL) << "FATAL ERROR";
+    }
+  }
+
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  const ReferenceSet& references_;
+};
+
+class ModUnionCheckReferences {
+ public:
+  typedef std::set<const Object*> ReferenceSet;
+
+  explicit ModUnionCheckReferences (
+      ModUnionTableReferenceCache* const mod_union_table,
+      const ReferenceSet& references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  void operator ()(Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    CheckReferenceVisitor visitor(mod_union_table_, references_);
+    MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  const ReferenceSet& references_;
+};
+
+void ModUnionTableReferenceCache::Verify() {
+  // Start by checking that everything in the mod union table is marked.
+  Heap* heap = GetHeap();
+  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
+    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+      DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
+    }
+  }
+
+  // Check the references of each clean card which is also in the mod union table.
+  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
+    const byte* card = &*it->first;
+    if (*card == CardTable::kCardClean) {
+      std::set<const Object*> reference_set;
+      for (ReferenceArray::const_iterator itr = it->second.begin(); itr != it->second.end();++itr) {
+        reference_set.insert(*itr);
+      }
+      ModUnionCheckReferences visitor(this, reference_set);
+      CardTable* card_table = heap->GetCardTable();
+      uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+      uintptr_t end = start + CardTable::kCardSize;
+      SpaceBitmap* live_bitmap =
+              heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+      live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
+    }
+  }
+}
+
+void ModUnionTableReferenceCache::Update() {
+  Heap* heap = GetHeap();
+  CardTable* card_table = heap->GetCardTable();
+
+  ReferenceArray cards_references;
+  ModUnionReferenceVisitor visitor(this, &cards_references);
+
+  for (ClearedCards::iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+    byte* card = *it;
+    // Clear and re-compute alloc space references associated with this card.
+    cards_references.clear();
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    SpaceBitmap* live_bitmap =
+        heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+    live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
+
+    // Update the corresponding references for the card.
+    // TODO: C++0x auto
+    ReferenceMap::iterator found = references_.find(card);
+    if (found == references_.end()) {
+      if (cards_references.empty()) {
+        // No reason to add empty array.
+        continue;
+      }
+      references_.Put(card, cards_references);
+    } else {
+      found->second = cards_references;
+    }
+  }
+  cleared_cards_.clear();
+}
+
+void ModUnionTableReferenceCache::MarkReferences(MarkSweep* mark_sweep) {
+  // TODO: C++0x auto
+  size_t count = 0;
+  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
+    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+      mark_sweep->MarkObject(*it_ref);
+      ++count;
+    }
+  }
+  if (VLOG_IS_ON(heap)) {
+    VLOG(gc) << "Marked " << count << " references in mod union table";
+  }
+}
+
+ModUnionTableCardCache::ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {
+
+}
+
+ModUnionTableCardCache::~ModUnionTableCardCache() {
+
+}
+
+void ModUnionTableCardCache::ClearCards(ContinuousSpace* space) {
+  CardTable* card_table = GetHeap()->GetCardTable();
+  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+  // Clear dirty cards in the this space and update the corresponding mod-union bits.
+  card_table->VisitClear(space->Begin(), space->End(), visitor);
+}
+
+// Mark all references to the alloc space(s).
+void ModUnionTableCardCache::MarkReferences(MarkSweep* mark_sweep) {
+  CardTable* card_table = heap_->GetCardTable();
+  ModUnionScanImageRootVisitor visitor(mark_sweep);
+  for (ClearedCards::const_iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+    byte* card = *it;
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    SpaceBitmap* live_bitmap =
+        heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
+    live_bitmap->VisitMarkedRange(start, end, visitor, IdentityFunctor());
+  }
+}
+
+}  // namespace art
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
new file mode 100644
index 0000000..5e4733a
--- /dev/null
+++ b/src/gc/mod_union_table.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_MOD_UNION_TABLE_H_
+#define ART_SRC_MOD_UNION_TABLE_H_
+
+#include "heap.h"
+#include "safe_map.h"
+#include "space.h"
+
+namespace art {
+
+class Heap;
+class HeapBitmap;
+class Space;
+
+// Base class
+class ModUnionTable {
+ public:
+  typedef std::vector<const Object*> ReferenceArray;
+  typedef std::set<byte*> ClearedCards;
+
+  ModUnionTable(Heap* heap) : heap_(heap) {
+
+  }
+
+  virtual ~ModUnionTable() {
+
+  }
+
+  // Clear cards which map to a memory range of a space.
+  virtual void ClearCards(ContinuousSpace* space) = 0;
+
+  // Update the mod-union table.
+  virtual void Update() = 0;
+
+  // Mark all references which are stored in the mod union table.
+  virtual void MarkReferences(MarkSweep* mark_sweep) = 0;
+
+  // Verification, sanity checks that we don't have clean cards which conflict with out cached data
+  // for said cards. Exclusive lock is required since verify sometimes uses
+  // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
+  // bitmap or not.
+  virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+
+  Heap* GetHeap() {
+    return heap_;
+  }
+
+ protected:
+  Heap* heap_;
+};
+
+// Bitmap implementation.
+// DEPRECATED, performs strictly less well than merely caching which cards were dirty.
+class ModUnionTableBitmap : public ModUnionTable {
+ public:
+  ModUnionTableBitmap(Heap* heap);
+  virtual ~ModUnionTableBitmap();
+
+  // Clear space cards.
+  void ClearCards(ContinuousSpace* space);
+
+  // Update table based on cleared cards.
+  void Update()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Mark all references to the alloc space(s).
+  void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ protected:
+  // Cleared card array, used to update the mod-union table.
+  std::vector<byte*> cleared_cards_;
+
+  // One bitmap per image space.
+  // TODO: Add support for Zygote spaces?
+  typedef SafeMap<ContinuousSpace*, SpaceBitmap*> BitmapMap;
+  BitmapMap bitmaps_;
+};
+
+// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
+class ModUnionTableReferenceCache : public ModUnionTable {
+ public:
+  typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
+
+  ModUnionTableReferenceCache(Heap* heap);
+  virtual ~ModUnionTableReferenceCache();
+
+  // Clear and store cards for a space.
+  void ClearCards(ContinuousSpace* space);
+
+  // Update table based on cleared cards.
+  void Update()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Mark all references to the alloc space(s).
+  void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
+  // VisitMarkedRange can't know if the callback will modify the bitmap or not.
+  void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Function that tells whether or not to add a reference to the table.
+  virtual bool AddReference(const Object* obj, const Object* ref) = 0;
+
+ protected:
+  // Cleared card array, used to update the mod-union table.
+  ClearedCards cleared_cards_;
+
+  // Maps from dirty cards to their corresponding alloc space references.
+  ReferenceMap references_;
+};
+
+// Card caching implementation. Keeps track of which cards we cleared and only this information.
+class ModUnionTableCardCache : public ModUnionTable {
+ public:
+  typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
+
+  ModUnionTableCardCache(Heap* heap);
+  virtual ~ModUnionTableCardCache();
+
+  // Clear and store cards for a space.
+  void ClearCards(ContinuousSpace* space);
+
+  // Nothing to update.
+  void Update() {}
+
+  // Mark all references to the alloc space(s).
+  void MarkReferences(MarkSweep* mark_sweep)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Nothing to verify.
+  void Verify() {}
+
+ protected:
+  // Cleared card array, used to update the mod-union table.
+  ClearedCards cleared_cards_;
+};
+
+template <typename Implementation>
+class ModUnionTableToZygoteAllocspace : public Implementation {
+public:
+  ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
+  }
+
+  bool AddReference(const Object* /* obj */, const Object* ref) {
+    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->Contains(ref)) {
+        return (*it)->IsAllocSpace();
+      }
+    }
+    // Assume it points to a large object.
+    // TODO: Check.
+    return true;
+  }
+};
+
+template <typename Implementation>
+class ModUnionTableToAllocspace : public Implementation {
+public:
+  ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
+  }
+
+  bool AddReference(const Object* /* obj */, const Object* ref) {
+    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->Contains(ref)) {
+        return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
+      }
+    }
+    if (ref != NULL) {
+      Implementation::GetHeap()->DumpSpaces();
+      LOG(FATAL) << "Reference " << ref << " not in any space!";
+    }
+    return false;
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_MOD_UNION_TABLE_H_
diff --git a/src/gc/space.cc b/src/gc/space.cc
new file mode 100644
index 0000000..9c8819b
--- /dev/null
+++ b/src/gc/space.cc
@@ -0,0 +1,770 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space.h"
+
+#include "UniquePtr.h"
+#include "dlmalloc.h"
+#include "file.h"
+#include "image.h"
+#include "logging.h"
+#include "os.h"
+#include "space_bitmap.h"
+#include "stl_util.h"
+#include "utils.h"
+
+namespace art {
+
+#ifndef NDEBUG
+static const bool kDebugSpaces = true;
+#else
+static const bool kDebugSpaces = false;
+#endif
+// Magic padding value that we use to check for buffer overruns.
+static const word kPaddingValue = 0xBAC0BAC0;
+
+// TODO: Remove define macro
+#define CHECK_MEMORY_CALL(call, args, what) \
+  do { \
+    int rc = call args; \
+    if (UNLIKELY(rc != 0)) { \
+      errno = rc; \
+      PLOG(FATAL) << # call << " failed for " << what; \
+    } \
+  } while (false)
+
+Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy)
+    : name_(name),
+      gc_retention_policy_(gc_retention_policy) {
+
+}
+
+ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
+                                 GcRetentionPolicy gc_retention_policy)
+    : Space(name, gc_retention_policy),
+      begin_(begin),
+      end_(end) {
+
+}
+
+MemMapSpace::MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+                         GcRetentionPolicy gc_retention_policy)
+    : ContinuousSpace(name, mem_map->Begin(), mem_map->Begin() + initial_size, gc_retention_policy),
+      mem_map_(mem_map)
+{
+
+}
+
+size_t AllocSpace::bitmap_index_ = 0;
+
+AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+                       byte* end, size_t growth_limit)
+    : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
+      num_bytes_allocated_(0), num_objects_allocated_(0),
+      lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
+      growth_limit_(growth_limit) {
+  CHECK(mspace != NULL);
+
+  size_t bitmap_index = bitmap_index_++;
+
+  static const uintptr_t kGcCardSize = static_cast<uintptr_t>(CardTable::kCardSize);
+  CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
+  CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
+  live_bitmap_.reset(SpaceBitmap::Create(
+      StringPrintf("allocspace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
+      Begin(), Capacity()));
+  DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
+
+  mark_bitmap_.reset(SpaceBitmap::Create(
+      StringPrintf("allocspace-%s-mark-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
+      Begin(), Capacity()));
+  DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
+}
+
+AllocSpace* AllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
+                               size_t capacity, byte* requested_begin) {
+  // Memory we promise to dlmalloc before it asks for morecore.
+  // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
+  // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
+  // size of the large allocation) will be greater than the footprint limit.
+  size_t starting_size = kPageSize;
+  uint64_t start_time = 0;
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    start_time = NanoTime();
+    VLOG(startup) << "Space::CreateAllocSpace entering " << name
+                  << " initial_size=" << PrettySize(initial_size)
+                  << " growth_limit=" << PrettySize(growth_limit)
+                  << " capacity=" << PrettySize(capacity)
+                  << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
+  }
+
+  // Sanity check arguments
+  if (starting_size > initial_size) {
+    initial_size = starting_size;
+  }
+  if (initial_size > growth_limit) {
+    LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
+        << PrettySize(initial_size) << ") is larger than its capacity ("
+        << PrettySize(growth_limit) << ")";
+    return NULL;
+  }
+  if (growth_limit > capacity) {
+    LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
+        << PrettySize(growth_limit) << ") is larger than the capacity ("
+        << PrettySize(capacity) << ")";
+    return NULL;
+  }
+
+  // Page align growth limit and capacity which will be used to manage mmapped storage
+  growth_limit = RoundUp(growth_limit, kPageSize);
+  capacity = RoundUp(capacity, kPageSize);
+
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
+                                                 capacity, PROT_READ | PROT_WRITE));
+  if (mem_map.get() == NULL) {
+    LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+        << PrettySize(capacity);
+    return NULL;
+  }
+
+  void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
+  if (mspace == NULL) {
+    LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
+    return NULL;
+  }
+
+  // Protect memory beyond the initial size.
+  byte* end = mem_map->Begin() + starting_size;
+  if (capacity - initial_size > 0) {
+    CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
+  }
+
+  // Everything is set so record in immutable structure and leave
+  MemMap* mem_map_ptr = mem_map.release();
+  AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+                                     growth_limit);
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+        << " ) " << *space;
+  }
+  return space;
+}
+
+void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
+  // clear errno to allow PLOG on error
+  errno = 0;
+  // create mspace using our backing storage starting at begin and with a footprint of
+  // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
+  // morecore_start bytes of memory is exhaused morecore will be called.
+  void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
+  if (msp != NULL) {
+    // Do not allow morecore requests to succeed beyond the initial size of the heap
+    mspace_set_footprint_limit(msp, initial_size);
+  } else {
+    PLOG(ERROR) << "create_mspace_with_base failed";
+  }
+  return msp;
+}
+
+void AllocSpace::SwapBitmaps() {
+  SpaceBitmap* temp_live_bitmap = live_bitmap_.release();
+  live_bitmap_.reset(mark_bitmap_.release());
+  mark_bitmap_.reset(temp_live_bitmap);
+  // Swap names to get more descriptive diagnostics.
+  std::string temp_name = live_bitmap_->GetName();
+  live_bitmap_->SetName(mark_bitmap_->GetName());
+  mark_bitmap_->SetName(temp_name);
+}
+
+Object* AllocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
+  if (kDebugSpaces) {
+    num_bytes += sizeof(word);
+  }
+
+  Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
+  if (kDebugSpaces && result != NULL) {
+    CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
+        << ") not in bounds of allocation space " << *this;
+    // Put a magic pattern before and after the allocation.
+    *reinterpret_cast<word*>(reinterpret_cast<byte*>(result) + AllocationSize(result)
+        - sizeof(word) - kChunkOverhead) = kPaddingValue;
+  }
+  num_bytes_allocated_ += AllocationSize(result);
+  ++num_objects_allocated_;
+  return result;
+}
+
+Object* AllocSpace::AllocWithoutGrowth(Thread* self, size_t num_bytes) {
+  MutexLock mu(self, lock_);
+  return AllocWithoutGrowthLocked(num_bytes);
+}
+
+Object* AllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
+  MutexLock mu(self, lock_);
+  // Grow as much as possible within the mspace.
+  size_t max_allowed = Capacity();
+  mspace_set_footprint_limit(mspace_, max_allowed);
+  // Try the allocation.
+  void* ptr = AllocWithoutGrowthLocked(num_bytes);
+  // Shrink back down as small as possible.
+  size_t footprint = mspace_footprint(mspace_);
+  mspace_set_footprint_limit(mspace_, footprint);
+  // Return the new allocation or NULL.
+  Object* result = reinterpret_cast<Object*>(ptr);
+  CHECK(!kDebugSpaces || result == NULL || Contains(result));
+  return result;
+}
+
+void AllocSpace::SetGrowthLimit(size_t growth_limit) {
+  growth_limit = RoundUp(growth_limit, kPageSize);
+  growth_limit_ = growth_limit;
+  if (Size() > growth_limit_) {
+    end_ = begin_ + growth_limit;
+  }
+}
+
+AllocSpace* AllocSpace::CreateZygoteSpace() {
+  end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
+  DCHECK(IsAligned<CardTable::kCardSize>(begin_));
+  DCHECK(IsAligned<CardTable::kCardSize>(end_));
+  DCHECK(IsAligned<kPageSize>(begin_));
+  DCHECK(IsAligned<kPageSize>(end_));
+  size_t size = RoundUp(Size(), kPageSize);
+  // Trim the heap so that we minimize the size of the Zygote space.
+  Trim();
+  // Trim our mem-map to free unused pages.
+  GetMemMap()->UnMapAtEnd(end_);
+  // TODO: Not hardcode these in?
+  const size_t starting_size = kPageSize;
+  const size_t initial_size = 2 * MB;
+  // Remaining size is for the new alloc space.
+  const size_t growth_limit = growth_limit_ - size;
+  const size_t capacity = Capacity() - size;
+  VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
+             << "End " << reinterpret_cast<const void*>(end_) << "\n"
+             << "Size " << size << "\n"
+             << "GrowthLimit " << growth_limit_ << "\n"
+             << "Capacity " << Capacity();
+  SetGrowthLimit(RoundUp(size, kPageSize));
+  SetFootprintLimit(RoundUp(size, kPageSize));
+  // FIXME: Do we need reference counted pointers here?
+  // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
+  VLOG(heap) << "Creating new AllocSpace: ";
+  VLOG(heap) << "Size " << GetMemMap()->Size();
+  VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
+  VLOG(heap) << "Capacity " << PrettySize(capacity);
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName().c_str(), End(), capacity, PROT_READ | PROT_WRITE));
+  void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
+  // Protect memory beyond the initial size.
+  byte* end = mem_map->Begin() + starting_size;
+  if (capacity - initial_size > 0) {
+    CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
+  }
+  AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
+  live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+  CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+  mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+  CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+  name_ += "-zygote-transformed";
+  VLOG(heap) << "zygote space creation done";
+  return alloc_space;
+}
+
+void AllocSpace::Free(Thread* self, Object* ptr) {
+  MutexLock mu(self, lock_);
+  if (kDebugSpaces) {
+    CHECK(ptr != NULL);
+    CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
+    CHECK_EQ(
+        *reinterpret_cast<word*>(reinterpret_cast<byte*>(ptr) + AllocationSize(ptr) -
+            sizeof(word) - kChunkOverhead), kPaddingValue);
+  }
+  num_bytes_allocated_ -= AllocationSize(ptr);
+  --num_objects_allocated_;
+  mspace_free(mspace_, ptr);
+}
+
+void AllocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+  MutexLock mu(self, lock_);
+  if (kDebugSpaces) {
+    CHECK(ptrs != NULL);
+    size_t num_broken_ptrs = 0;
+    for (size_t i = 0; i < num_ptrs; i++) {
+      if (!Contains(ptrs[i])) {
+        num_broken_ptrs++;
+        LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
+      } else {
+        size_t size = mspace_usable_size(ptrs[i]);
+        memset(ptrs[i], 0xEF, size);
+      }
+    }
+    CHECK_EQ(num_broken_ptrs, 0u);
+  }
+  for (size_t i = 0; i < num_ptrs; i++) {
+    num_bytes_allocated_ -= AllocationSize(ptrs[i]);
+  }
+  num_objects_allocated_ -= num_ptrs;
+  mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
+}
+
+// Callback from dlmalloc when it needs to increase the footprint
+extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
+  Heap* heap = Runtime::Current()->GetHeap();
+  DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
+  return heap->GetAllocSpace()->MoreCore(increment);
+}
+
+void* AllocSpace::MoreCore(intptr_t increment) {
+  lock_.AssertHeld(Thread::Current());
+  byte* original_end = end_;
+  if (increment != 0) {
+    VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
+    byte* new_end = original_end + increment;
+    if (increment > 0) {
+#if DEBUG_SPACES
+      // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
+      // by mspace_set_footprint_limit.
+      CHECK_LE(new_end, Begin() + Capacity());
+#endif
+      CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
+    } else {
+#if DEBUG_SPACES
+      // Should never be asked for negative footprint (ie before begin)
+      CHECK_GT(original_end + increment, Begin());
+#endif
+      // Advise we don't need the pages and protect them
+      // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
+      // expensive (note the same isn't true for giving permissions to a page as the protected
+      // page shouldn't be in a TLB). We should investigate performance impact of just
+      // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
+      // likely just a useful debug feature.
+      size_t size = -increment;
+      CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
+      CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
+    }
+    // Update end_
+    end_ = new_end;
+  }
+  return original_end;
+}
+
+size_t AllocSpace::AllocationSize(const Object* obj) {
+  return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
+      kChunkOverhead;
+}
+
+void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /* arg */) {
+  // Is this chunk in use?
+  if (used_bytes != 0) {
+    return;
+  }
+  // Do we have any whole pages to give back?
+  start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
+  end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
+  if (end > start) {
+    size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
+    CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
+  }
+}
+
+void AllocSpace::Trim() {
+  MutexLock mu(Thread::Current(), lock_);
+  // Trim to release memory at the end of the space.
+  mspace_trim(mspace_, 0);
+  // Visit space looking for page-sized holes to advise the kernel we don't need.
+  mspace_inspect_all(mspace_, MspaceMadviseCallback, NULL);
+}
+
+void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
+                      void* arg) {
+  MutexLock mu(Thread::Current(), lock_);
+  mspace_inspect_all(mspace_, callback, arg);
+  callback(NULL, NULL, 0, arg);  // Indicate end of a space.
+}
+
+size_t AllocSpace::GetFootprintLimit() {
+  MutexLock mu(Thread::Current(), lock_);
+  return mspace_footprint_limit(mspace_);
+}
+
+void AllocSpace::SetFootprintLimit(size_t new_size) {
+  MutexLock mu(Thread::Current(), lock_);
+  VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
+  // Compare against the actual footprint, rather than the Size(), because the heap may not have
+  // grown all the way to the allowed size yet.
+  size_t current_space_size = mspace_footprint(mspace_);
+  if (new_size < current_space_size) {
+    // Don't let the space grow any more.
+    new_size = current_space_size;
+  }
+  mspace_set_footprint_limit(mspace_, new_size);
+}
+
+size_t ImageSpace::bitmap_index_ = 0;
+
+ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
+    : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
+  const size_t bitmap_index = bitmap_index_++;
+  live_bitmap_.reset(SpaceBitmap::Create(
+      StringPrintf("imagespace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
+      Begin(), Capacity()));
+  DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
+}
+
+ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
+  CHECK(!image_file_name.empty());
+
+  uint64_t start_time = 0;
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    start_time = NanoTime();
+    LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
+  }
+
+  UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
+  if (file.get() == NULL) {
+    LOG(ERROR) << "Failed to open " << image_file_name;
+    return NULL;
+  }
+  ImageHeader image_header;
+  bool success = file->ReadFully(&image_header, sizeof(image_header));
+  if (!success || !image_header.IsValid()) {
+    LOG(ERROR) << "Invalid image header " << image_file_name;
+    return NULL;
+  }
+  UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+                                                 file->Length(),
+                                                 // TODO: selectively PROT_EXEC stubs
+                                                 PROT_READ | PROT_WRITE | PROT_EXEC,
+                                                 MAP_PRIVATE | MAP_FIXED,
+                                                 file->Fd(),
+                                                 0));
+  if (map.get() == NULL) {
+    LOG(ERROR) << "Failed to map " << image_file_name;
+    return NULL;
+  }
+  CHECK_EQ(image_header.GetImageBegin(), map->Begin());
+  DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
+
+  Runtime* runtime = Runtime::Current();
+  Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
+  runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
+
+  Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
+  runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
+
+  Object* resolution_stub_array =
+      image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
+  runtime->SetResolutionStubArray(
+      down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
+  resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
+  runtime->SetResolutionStubArray(
+      down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
+
+  Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+  runtime->SetResolutionMethod(down_cast<AbstractMethod*>(resolution_method));
+
+  Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
+  ImageSpace* space = new ImageSpace(image_file_name, map.release());
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+        << ") " << *space;
+  }
+  return space;
+}
+
+void ImageSpace::RecordImageAllocations(SpaceBitmap* live_bitmap) const {
+  uint64_t start_time = 0;
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
+    start_time = NanoTime();
+  }
+  DCHECK(!Runtime::Current()->IsStarted());
+  CHECK(live_bitmap != NULL);
+  byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+  byte* end = End();
+  while (current < end) {
+    DCHECK_ALIGNED(current, kObjectAlignment);
+    const Object* obj = reinterpret_cast<const Object*>(current);
+    live_bitmap->Set(obj);
+    current += RoundUp(obj->SizeOf(), kObjectAlignment);
+  }
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
+        << PrettyDuration(NanoTime() - start_time) << ")";
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const Space& space) {
+  space.Dump(os);
+  return os;
+}
+
+void AllocSpace::Dump(std::ostream& os) const {
+  os << GetType()
+      << "begin=" << reinterpret_cast<void*>(Begin())
+      << ",end=" << reinterpret_cast<void*>(End())
+      << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
+      << ",name=\"" << GetName() << "\"]";
+}
+
+void ImageSpace::Dump(std::ostream& os) const {
+  os << GetType()
+      << "begin=" << reinterpret_cast<void*>(Begin())
+      << ",end=" << reinterpret_cast<void*>(End())
+      << ",size=" << PrettySize(Size())
+      << ",name=\"" << GetName() << "\"]";
+}
+
+void LargeObjectSpace::SwapBitmaps() {
+  SpaceSetMap* temp_live_objects = live_objects_.release();
+  live_objects_.reset(mark_objects_.release());
+  mark_objects_.reset(temp_live_objects);
+  // Swap names to get more descriptive diagnostics.
+  std::string temp_name = live_objects_->GetName();
+  live_objects_->SetName(mark_objects_->GetName());
+  mark_objects_->SetName(temp_name);
+}
+
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+                                       GcRetentionPolicy gc_retention_policy)
+    : Space(name, gc_retention_policy) {
+
+}
+
+LargeObjectSpace::LargeObjectSpace(const std::string& name)
+    : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
+      num_bytes_allocated_(0),
+      num_objects_allocated_(0) {
+  live_objects_.reset(new SpaceSetMap("large live objects"));
+  mark_objects_.reset(new SpaceSetMap("large marked objects"));
+}
+
+
+void LargeObjectSpace::CopyLiveToMarked() {
+  mark_objects_->CopyFrom(*live_objects_.get());
+}
+
+LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
+    : LargeObjectSpace(name),
+      lock_("large object space lock", kAllocSpaceLock)
+{
+
+}
+
+LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
+  return new LargeObjectMapSpace(name);
+}
+
+Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+  MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
+  if (mem_map == NULL) {
+    return NULL;
+  }
+  MutexLock mu(self, lock_);
+  Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
+  large_objects_.push_back(obj);
+  mem_maps_.Put(obj, mem_map);
+  num_bytes_allocated_ += mem_map->Size();
+  ++num_objects_allocated_;
+  return obj;
+}
+
+void LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
+  MutexLock mu(self, lock_);
+  MemMaps::iterator found = mem_maps_.find(ptr);
+  CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
+  DCHECK_GE(num_bytes_allocated_, found->second->Size());
+  num_bytes_allocated_ -= found->second->Size();
+  --num_objects_allocated_;
+  delete found->second;
+  mem_maps_.erase(found);
+}
+
+size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
+  MutexLock mu(Thread::Current(), lock_);
+  MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
+  CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
+  return found->second->Size();
+}
+
+void LargeObjectMapSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
+  MutexLock mu(Thread::Current(), lock_);
+  for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
+    MemMap* mem_map = it->second;
+    callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
+    callback(NULL, NULL, 0, arg);
+  }
+}
+
+bool LargeObjectMapSpace::Contains(const Object* obj) const {
+  MutexLock mu(Thread::Current(), lock_);
+  return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
+}
+
+FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+  CHECK(size % kAlignment == 0);
+  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
+                                         PROT_READ | PROT_WRITE);
+  CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
+  return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+}
+
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+    : LargeObjectSpace(name),
+      begin_(begin),
+      end_(end),
+      mem_map_(mem_map),
+      lock_("free list space lock", kAllocSpaceLock) {
+  chunks_.resize(Size() / kAlignment + 1);
+  // Add a dummy chunk so we don't need to handle chunks having no next chunk.
+  chunks_.back().SetSize(kAlignment, false);
+  // Start out with one large free chunk.
+  AddFreeChunk(begin_, end_ - begin_, NULL);
+}
+
+FreeListSpace::~FreeListSpace() {
+
+}
+
+void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
+  Chunk* chunk = ChunkFromAddr(address);
+  chunk->SetSize(size, true);
+  chunk->SetPrevious(previous);
+  Chunk* next_chunk = GetNextChunk(chunk);
+  next_chunk->SetPrevious(chunk);
+  free_chunks_.insert(chunk);
+}
+
+FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
+  size_t offset = reinterpret_cast<byte*>(address) - Begin();
+  DCHECK(IsAligned<kAlignment>(offset));
+  DCHECK_LT(offset, Size());
+  return &chunks_[offset / kAlignment];
+}
+
+void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
+  return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
+}
+
+void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
+  // TODO: C++0x
+  // TODO: Improve performance, this might be slow.
+  std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
+  for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
+    if (*it == chunk) {
+      free_chunks_.erase(it);
+      return;
+    }
+  }
+}
+
+void FreeListSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
+  MutexLock mu(Thread::Current(), lock_);
+  for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
+    if (!chunk->IsFree()) {
+      size_t size = chunk->GetSize();
+      void* begin = AddrFromChunk(chunk);
+      void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
+      callback(begin, end, size, arg);
+      callback(NULL, NULL, 0, arg);
+    }
+    chunk = GetNextChunk(chunk);
+  }
+}
+
+void FreeListSpace::Free(Thread* self, Object* obj) {
+  MutexLock mu(self, lock_);
+  CHECK(Contains(obj));
+  // Check adjacent chunks to see if we need to combine.
+  Chunk* chunk = ChunkFromAddr(obj);
+  CHECK(!chunk->IsFree());
+
+  size_t allocation_size = chunk->GetSize();
+  madvise(obj, allocation_size, MADV_DONTNEED);
+  num_objects_allocated_--;
+  num_bytes_allocated_ -= allocation_size;
+  Chunk* prev = chunk->GetPrevious();
+  Chunk* next = GetNextChunk(chunk);
+
+  // Combine any adjacent free chunks
+  size_t extra_size = chunk->GetSize();
+  if (next->IsFree()) {
+    extra_size += next->GetSize();
+    RemoveFreeChunk(next);
+  }
+  if (prev != NULL && prev->IsFree()) {
+    RemoveFreeChunk(prev);
+    AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
+  } else {
+    AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
+  }
+}
+
+bool FreeListSpace::Contains(const Object* obj) const {
+  return mem_map_->HasAddress(obj);
+}
+
+FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
+  return chunk + chunk->GetSize() / kAlignment;
+}
+
+size_t FreeListSpace::AllocationSize(const Object* obj) {
+  Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
+  CHECK(!chunk->IsFree());
+  return chunk->GetSize();
+}
+
+Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+  MutexLock mu(self, lock_);
+  num_bytes = RoundUp(num_bytes, kAlignment);
+  Chunk temp;
+  temp.SetSize(num_bytes);
+  // Find the smallest chunk at least num_bytes in size.
+  FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
+  if (found == free_chunks_.end()) {
+    // Out of memory, or too much fragmentation.
+    return NULL;
+  }
+  Chunk* chunk = *found;
+  free_chunks_.erase(found);
+  CHECK(chunk->IsFree());
+  void* addr = AddrFromChunk(chunk);
+  size_t chunk_size = chunk->GetSize();
+  chunk->SetSize(num_bytes);
+  if (chunk_size > num_bytes) {
+    // Split the chunk into two chunks.
+    Chunk* new_chunk = GetNextChunk(chunk);
+    AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
+  }
+
+  num_objects_allocated_++;
+  num_bytes_allocated_ += num_bytes;
+  return reinterpret_cast<Object*>(addr);
+}
+
+void FreeListSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Free(self, ptrs[i]);
+  }
+}
+
+}  // namespace art
diff --git a/src/gc/space.h b/src/gc/space.h
new file mode 100644
index 0000000..a543500
--- /dev/null
+++ b/src/gc/space.h
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_SPACE_H_
+#define ART_SRC_SPACE_H_
+
+#include <string>
+
+#include "../mutex.h"
+#include "UniquePtr.h"
+#include "globals.h"
+#include "image.h"
+#include "macros.h"
+#include "dlmalloc.h"
+#include "mem_map.h"
+
+namespace art {
+
+class AllocSpace;
+class ImageSpace;
+class LargeObjectSpace;
+class Object;
+class SpaceBitmap;
+
+enum GcRetentionPolicy {
+  kGcRetentionPolicyNeverCollect,
+  kGcRetentionPolicyAlwaysCollect,
+  kGcRetentionPolicyFullCollect, // Collect only for full GC
+};
+std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+
+enum SpaceType {
+  kSpaceTypeImageSpace,
+  kSpaceTypeAllocSpace,
+  kSpaceTypeZygoteSpace,
+  kSpaceTypeLargeObjectSpace,
+};
+std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
+
+// A space contains memory allocated for managed objects.
+class Space {
+ public:
+  virtual bool CanAllocateInto() const = 0;
+  virtual bool IsCompactible() const = 0;
+  virtual bool Contains(const Object* obj) const = 0;
+  virtual SpaceType GetType() const = 0;
+
+  const std::string& GetName() const {
+    return name_;
+  }
+
+  GcRetentionPolicy GetGcRetentionPolicy() const {
+    return gc_retention_policy_;
+  }
+
+  void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+    gc_retention_policy_ = gc_retention_policy;
+  }
+
+  ImageSpace* AsImageSpace() {
+    DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+    return down_cast<ImageSpace*>(this);
+  }
+
+  AllocSpace* AsAllocSpace() {
+    DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
+    return down_cast<AllocSpace*>(this);
+  }
+
+  AllocSpace* AsZygoteSpace() {
+    DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
+    return down_cast<AllocSpace*>(this);
+  }
+
+  LargeObjectSpace* AsLargeObjectSpace() {
+    DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
+    return down_cast<LargeObjectSpace*>(this);
+  }
+
+  bool IsImageSpace() const {
+    return GetType() == kSpaceTypeImageSpace;
+  }
+
+  bool IsAllocSpace() const {
+    return GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace;
+  }
+
+  bool IsZygoteSpace() const {
+    return GetType() == kSpaceTypeZygoteSpace;
+  }
+
+  bool IsLargeObjectSpace() const {
+    return GetType() == kSpaceTypeLargeObjectSpace;
+  }
+
+  virtual void Dump(std::ostream& /* os */) const { }
+
+  virtual ~Space() {}
+
+ protected:
+  Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+  // Name of the space.
+  std::string name_;
+
+  // Garbage collection retention policy, used to figure out when we should sweep over this space.
+  GcRetentionPolicy gc_retention_policy_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Space);
+};
+
+// Continuous spaces have bitmaps, and an address range.
+class ContinuousSpace : public Space {
+ public:
+  // Address at which the space begins
+  byte* Begin() const {
+    return begin_;
+  }
+
+  // Address at which the space ends, which may vary as the space is filled.
+  byte* End() const {
+    return end_;
+  }
+
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+
+  virtual SpaceBitmap* GetLiveBitmap() const = 0;
+  virtual SpaceBitmap* GetMarkBitmap() const = 0;
+
+  // Is object within this space?
+  bool HasAddress(const Object* obj) const {
+    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+    return Begin() <= byte_ptr && byte_ptr < End();
+  }
+
+  virtual bool Contains(const Object* obj) const {
+    return HasAddress(obj);
+  }
+
+  virtual ~ContinuousSpace() {}
+
+ protected:
+  ContinuousSpace(const std::string& name, byte* begin, byte* end,
+                  GcRetentionPolicy gc_retention_policy);
+
+  // The beginning of the storage for fast access.
+  byte* begin_;
+
+  // Current end of the space.
+  byte* end_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+};
+
+class DiscontinuousSpace : public Space {
+ public:
+  // Is object within this space?
+  virtual bool Contains(const Object* obj) const = 0;
+
+protected:
+  DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+private:
+  DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+};
+
+std::ostream& operator<<(std::ostream& os, const Space& space);
+
+class MemMapSpace : public ContinuousSpace {
+ public:
+  // Maximum which the mapped space can grow to.
+  virtual size_t Capacity() const {
+    return mem_map_->Size();
+  }
+
+  // Size of the space without a limit on its growth. By default this is just the Capacity, but
+  // for the allocation space we support starting with a small heap and then extending it.
+  virtual size_t NonGrowthLimitCapacity() const {
+    return Capacity();
+  }
+
+ protected:
+  MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+              GcRetentionPolicy gc_retention_policy);
+
+  MemMap* GetMemMap() {
+    return mem_map_.get();
+  }
+
+  const MemMap* GetMemMap() const {
+    return mem_map_.get();
+  }
+
+ private:
+  // Underlying storage of the space
+  UniquePtr<MemMap> mem_map_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+};
+
+// An alloc space is a space where objects may be allocated and garbage collected.
+class AllocSpace : public MemMapSpace {
+ public:
+  typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+  virtual bool CanAllocateInto() const {
+    return true;
+  }
+
+  virtual bool IsCompactible() const {
+    return false;
+  }
+
+  virtual SpaceType GetType() const {
+    return kSpaceTypeAllocSpace;
+  }
+
+  // Create a AllocSpace with the requested sizes. The requested
+  // base address is not guaranteed to be granted, if it is required,
+  // the caller should call Begin on the returned space to confirm
+  // the request was granted.
+  static AllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+                            size_t capacity, byte* requested_begin);
+
+  // Allocate num_bytes without allowing the underlying mspace to grow.
+  virtual Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+
+  // Allocate num_bytes allowing the underlying mspace to grow.
+  virtual Object* AllocWithoutGrowth(Thread* self, size_t num_bytes);
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const Object* obj);
+  virtual void Free(Thread* self, Object* ptr);
+  virtual void FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+
+  void* MoreCore(intptr_t increment);
+
+  void* GetMspace() const {
+    return mspace_;
+  }
+
+  // Hands unused pages back to the system.
+  void Trim();
+
+  // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
+  // in use, indicated by num_bytes equaling zero.
+  void Walk(WalkCallback callback, void* arg);
+
+  // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
+  size_t GetFootprintLimit();
+
+  // Set the maximum number of bytes that the heap is allowed to obtain from the system via
+  // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
+  // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
+  void SetFootprintLimit(size_t limit);
+
+  // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+  // maximum reserved size of the heap.
+  void ClearGrowthLimit() {
+    growth_limit_ = NonGrowthLimitCapacity();
+  }
+
+  // Override capacity so that we only return the possibly limited capacity
+  virtual size_t Capacity() const {
+    return growth_limit_;
+  }
+
+  // The total amount of memory reserved for the alloc space
+  virtual size_t NonGrowthLimitCapacity() const {
+    return GetMemMap()->Size();
+  }
+
+  virtual SpaceBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
+
+  virtual SpaceBitmap* GetMarkBitmap() const {
+    return mark_bitmap_.get();
+  }
+
+  virtual void Dump(std::ostream& os) const;
+
+  void SetGrowthLimit(size_t growth_limit);
+
+  // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
+  virtual void SwapBitmaps();
+
+  // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
+  AllocSpace* CreateZygoteSpace();
+
+  size_t GetNumBytesAllocated() const {
+    return num_bytes_allocated_;
+  }
+
+  size_t GetNumObjectsAllocated() const {
+    return num_objects_allocated_;
+  }
+
+ private:
+  Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+  UniquePtr<SpaceBitmap> live_bitmap_;
+  UniquePtr<SpaceBitmap> mark_bitmap_;
+  UniquePtr<SpaceBitmap> temp_bitmap_;
+
+  // Approximate number of bytes which have been allocated into the space.
+  size_t num_bytes_allocated_;
+  size_t num_objects_allocated_;
+
+  static size_t bitmap_index_;
+
+  AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
+             size_t growth_limit);
+
+  bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
+
+  static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
+
+  // The boundary tag overhead.
+  static const size_t kChunkOverhead = kWordSize;
+
+  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+  Mutex lock_;
+
+  // Underlying malloc space
+  void* const mspace_;
+
+  // The capacity of the alloc space until such time that ClearGrowthLimit is called.
+  // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
+  // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
+  // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
+  // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
+  // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
+  // one time by a call to ClearGrowthLimit.
+  size_t growth_limit_;
+
+  friend class MarkSweep;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+};
+
+// An image space is a space backed with a memory mapped image
+class ImageSpace : public MemMapSpace {
+ public:
+  virtual bool CanAllocateInto() const {
+    return false;
+  }
+
+  virtual bool IsCompactible() const {
+    return false;
+  }
+
+  virtual SpaceType GetType() const {
+    return kSpaceTypeImageSpace;
+  }
+
+  // create a Space from an image file. cannot be used for future allocation or collected.
+  static ImageSpace* Create(const std::string& image)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  const ImageHeader& GetImageHeader() const {
+    return *reinterpret_cast<ImageHeader*>(Begin());
+  }
+
+  const std::string& GetImageFilename() const {
+    return GetName();
+  }
+
+  // Mark the objects defined in this space in the given live bitmap
+  void RecordImageAllocations(SpaceBitmap* live_bitmap) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  virtual SpaceBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
+
+  virtual SpaceBitmap* GetMarkBitmap() const {
+    // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
+    // special cases to test against.
+    return live_bitmap_.get();
+  }
+
+  virtual void Dump(std::ostream& os) const;
+
+ private:
+  friend class Space;
+
+  UniquePtr<SpaceBitmap> live_bitmap_;
+  static size_t bitmap_index_;
+
+  ImageSpace(const std::string& name, MemMap* mem_map);
+
+  DISALLOW_COPY_AND_ASSIGN(ImageSpace);
+};
+
+class LargeObjectSpace : public DiscontinuousSpace {
+ public:
+  virtual bool CanAllocateInto() const {
+    return true;
+  }
+
+  virtual bool IsCompactible() const {
+    return true;
+  }
+
+  virtual SpaceType GetType() const {
+    return kSpaceTypeLargeObjectSpace;
+  }
+
+  virtual SpaceSetMap* GetLiveObjects() const {
+    return live_objects_.get();
+  }
+
+  virtual SpaceSetMap* GetMarkObjects() const {
+    return mark_objects_.get();
+  }
+
+  virtual void SwapBitmaps();
+  virtual void CopyLiveToMarked();
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const Object* obj) = 0;
+  virtual Object* Alloc(Thread* self, size_t num_bytes) = 0;
+  virtual void Free(Thread* self, Object* ptr) = 0;
+  virtual void Walk(AllocSpace::WalkCallback, void* arg) = 0;
+
+  virtual ~LargeObjectSpace() {}
+
+
+  size_t GetNumBytesAllocated() const {
+    return num_bytes_allocated_;
+  }
+
+  size_t GetNumObjectsAllocated() const {
+    return num_objects_allocated_;
+  }
+
+ protected:
+
+  LargeObjectSpace(const std::string& name);
+
+  // Approximate number of bytes which have been allocated into the space.
+  size_t num_bytes_allocated_;
+  size_t num_objects_allocated_;
+
+  UniquePtr<SpaceSetMap> live_objects_;
+  UniquePtr<SpaceSetMap> mark_objects_;
+
+  friend class Space;
+};
+
+class LargeObjectMapSpace : public LargeObjectSpace {
+ public:
+
+  // Creates a large object space. Allocations into the large object space use memory maps instead
+  // of malloc.
+  static LargeObjectMapSpace* Create(const std::string& name);
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const Object* obj);
+  virtual Object* Alloc(Thread* self, size_t num_bytes);
+  virtual void Free(Thread* self, Object* ptr);
+  virtual void Walk(AllocSpace::WalkCallback, void* arg);
+  virtual bool Contains(const Object* obj) const;
+private:
+  LargeObjectMapSpace(const std::string& name);
+  virtual ~LargeObjectMapSpace() {}
+
+  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+  mutable Mutex lock_;
+  std::vector<Object*> large_objects_;
+  typedef SafeMap<Object*, MemMap*> MemMaps;
+  MemMaps mem_maps_;
+};
+
+class FreeListSpace : public LargeObjectSpace {
+ public:
+  virtual ~FreeListSpace();
+  static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+
+  virtual size_t AllocationSize(const Object* obj);
+  virtual Object* Alloc(Thread* self, size_t num_bytes);
+  virtual void Free(Thread* self, Object* obj);
+  virtual void FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+  virtual bool Contains(const Object* obj) const;
+  virtual void Walk(AllocSpace::WalkCallback callback, void* arg);
+
+  // Address at which the space begins
+  byte* Begin() const {
+    return begin_;
+  }
+
+  // Address at which the space ends, which may vary as the space is filled.
+  byte* End() const {
+    return end_;
+  }
+
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+ private:
+  static const size_t kAlignment = kPageSize;
+
+  class Chunk {
+   public:
+    static const size_t kFreeFlag = 0x80000000;
+
+    struct SortBySize {
+      bool operator()(const Chunk* a, const Chunk* b) const {
+        return a->GetSize() < b->GetSize();
+      }
+    };
+
+    bool IsFree() const {
+      return (m_size & kFreeFlag) != 0;
+    }
+
+    void SetSize(size_t size, bool is_free = false) {
+      m_size = size | (is_free ? kFreeFlag : 0);
+    }
+
+    size_t GetSize() const {
+      return m_size & (kFreeFlag - 1);
+    }
+
+    Chunk* GetPrevious() {
+      return m_previous;
+    }
+
+    void SetPrevious(Chunk* previous) {
+      m_previous = previous;
+      DCHECK(m_previous == NULL ||
+            (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
+    }
+   private:
+    size_t m_size;
+    Chunk* m_previous;
+  };
+
+  FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+  void AddFreeChunk(void* address, size_t size, Chunk* previous);
+  Chunk* ChunkFromAddr(void* address);
+  void* AddrFromChunk(Chunk* chunk);
+  void RemoveFreeChunk(Chunk* chunk);
+  Chunk* GetNextChunk(Chunk* chunk);
+
+  typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+  byte* begin_;
+  byte* end_;
+  UniquePtr<MemMap> mem_map_;
+  Mutex lock_;
+  std::vector<Chunk> chunks_;
+  FreeChunks free_chunks_;
+};
+
+// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
+// pages back to the kernel.
+void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
+
+}  // namespace art
+
+#endif  // ART_SRC_SPACE_H_
diff --git a/src/gc/space_bitmap.cc b/src/gc/space_bitmap.cc
new file mode 100644
index 0000000..273ae4f
--- /dev/null
+++ b/src/gc/space_bitmap.cc
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap_bitmap.h"
+
+#include "logging.h"
+#include "UniquePtr.h"
+#include "utils.h"
+
+namespace art {
+
+std::string SpaceBitmap::GetName() const {
+  return name_;
+}
+
+void SpaceBitmap::SetName(const std::string& name) {
+  name_ = name;
+}
+
+void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+  for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
+    callback(const_cast<Object*>(*it), arg);
+  }
+}
+
+SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size_t heap_capacity) {
+  CHECK(heap_begin != NULL);
+  // Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
+  size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size, PROT_READ | PROT_WRITE));
+  if (mem_map.get() == NULL) {
+    LOG(ERROR) << "Failed to allocate bitmap " << name;
+    return NULL;
+  }
+  word* bitmap_begin = reinterpret_cast<word*>(mem_map->Begin());
+  return new SpaceBitmap(name, mem_map.release(), bitmap_begin, bitmap_size, heap_begin);
+}
+
+// Clean up any resources associated with the bitmap.
+SpaceBitmap::~SpaceBitmap() {
+
+}
+
+void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
+  DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
+  size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
+  if (new_size < bitmap_size_) {
+    bitmap_size_ = new_size;
+  }
+  // Not sure if doing this trim is necessary, since nothing past the end of the heap capacity
+  // should be marked.
+  // TODO: Fix this code is, it broken and causes rare heap corruption!
+  // mem_map_->Trim(reinterpret_cast<byte*>(heap_begin_ + bitmap_size_));
+}
+
+// Fill the bitmap with zeroes.  Returns the bitmap's memory to the
+// system as a side-effect.
+void SpaceBitmap::Clear() {
+  if (bitmap_begin_ != NULL) {
+    // This returns the memory to the system.  Successive page faults
+    // will return zeroed memory.
+    int result = madvise(bitmap_begin_, bitmap_size_, MADV_DONTNEED);
+    if (result == -1) {
+      PLOG(FATAL) << "madvise failed";
+    }
+  }
+}
+
+void SpaceBitmap::CopyFrom(SpaceBitmap* source_bitmap) {
+  DCHECK_EQ(Size(), source_bitmap->Size());
+  std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
+}
+
+// Visits set bits in address order.  The callback is not permitted to
+// change the bitmap bits or max during the traversal.
+void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+  CHECK(bitmap_begin_ != NULL);
+  CHECK(callback != NULL);
+
+  uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
+  word* bitmap_begin = bitmap_begin_;
+  for (uintptr_t i = 0; i <= end; ++i) {
+    word w = bitmap_begin[i];
+    if (w != 0) {
+      uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+      do {
+        const size_t shift = CLZ(w);
+        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        (*callback)(obj, arg);
+        w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+      } while (w != 0);
+    }
+  }
+}
+
+// Walk through the bitmaps in increasing address order, and find the
+// object pointers that correspond to garbage objects.  Call
+// <callback> zero or more times with lists of these object pointers.
+//
+// The callback is not permitted to increase the max of either bitmap.
+void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap,
+                           const SpaceBitmap& mark_bitmap,
+                           uintptr_t sweep_begin, uintptr_t sweep_end,
+                           SpaceBitmap::SweepCallback* callback, void* arg) {
+  CHECK(live_bitmap.bitmap_begin_ != NULL);
+  CHECK(mark_bitmap.bitmap_begin_ != NULL);
+  CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
+  CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
+  CHECK(callback != NULL);
+  CHECK_LE(sweep_begin, sweep_end);
+  CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
+
+  if (sweep_end <= sweep_begin) {
+    return;
+  }
+
+  // TODO: rewrite the callbacks to accept a std::vector<Object*> rather than a Object**?
+  const size_t buffer_size = kWordSize * kBitsPerWord;
+  Object* pointer_buf[buffer_size];
+  Object** pb = &pointer_buf[0];
+  size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
+  size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
+  CHECK_LT(end, live_bitmap.Size() / kWordSize);
+  word* live = live_bitmap.bitmap_begin_;
+  word* mark = mark_bitmap.bitmap_begin_;
+  for (size_t i = start; i <= end; i++) {
+    word garbage = live[i] & ~mark[i];
+    if (UNLIKELY(garbage != 0)) {
+      uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
+      do {
+        const size_t shift = CLZ(garbage);
+        garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+        *pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+      } while (garbage != 0);
+      // Make sure that there are always enough slots available for an
+      // entire word of one bits.
+      if (pb >= &pointer_buf[buffer_size - kBitsPerWord]) {
+        (*callback)(pb - &pointer_buf[0], &pointer_buf[0], arg);
+        pb = &pointer_buf[0];
+      }
+    }
+  }
+  if (pb > &pointer_buf[0]) {
+    (*callback)(pb - &pointer_buf[0], &pointer_buf[0], arg);
+  }
+}
+
+}  // namespace art
+
+// Support needed for in order traversal
+#include "object.h"
+#include "object_utils.h"
+
+namespace art {
+
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+                              void* arg);
+
+// Walk instance fields of the given Class. Separate function to allow recursion on the super
+// class.
+static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+                               Class* klass, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Visit fields of parent classes first.
+  Class* super = klass->GetSuperClass();
+  if (super != NULL) {
+    WalkInstanceFields(visited, callback, obj, super, arg);
+  }
+  // Walk instance fields
+  ObjectArray<Field>* fields = klass->GetIFields();
+  if (fields != NULL) {
+    for (int32_t i = 0; i < fields->GetLength(); i++) {
+      Field* field = fields->Get(i);
+      FieldHelper fh(field);
+      if (!fh.IsPrimitiveType()) {
+        Object* value = field->GetObj(obj);
+        if (value != NULL) {
+          WalkFieldsInOrder(visited, callback, value,  arg);
+        }
+      }
+    }
+  }
+}
+
+// For an unvisited object, visit it then all its children found via fields.
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+                              void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (visited->Test(obj)) {
+    return;
+  }
+  // visit the object itself
+  (*callback)(obj, arg);
+  visited->Set(obj);
+  // Walk instance fields of all objects
+  Class* klass = obj->GetClass();
+  WalkInstanceFields(visited, callback, obj, klass, arg);
+  // Walk static fields of a Class
+  if (obj->IsClass()) {
+    ObjectArray<Field>* fields = klass->GetSFields();
+    if (fields != NULL) {
+      for (int32_t i = 0; i < fields->GetLength(); i++) {
+        Field* field = fields->Get(i);
+        FieldHelper fh(field);
+        if (!fh.IsPrimitiveType()) {
+          Object* value = field->GetObj(NULL);
+          if (value != NULL) {
+            WalkFieldsInOrder(visited, callback, value, arg);
+          }
+        }
+      }
+    }
+  } else if (obj->IsObjectArray()) {
+    // Walk elements of an object array
+    ObjectArray<Object>* obj_array = obj->AsObjectArray<Object>();
+    int32_t length = obj_array->GetLength();
+    for (int32_t i = 0; i < length; i++) {
+      Object* value = obj_array->Get(i);
+      if (value != NULL) {
+        WalkFieldsInOrder(visited, callback, value, arg);
+      }
+    }
+  }
+}
+
+// Visits set bits with an in order traversal.  The callback is not permitted to change the bitmap
+// bits or max during the traversal.
+void SpaceBitmap::InOrderWalk(SpaceBitmap::Callback* callback, void* arg) {
+  UniquePtr<SpaceBitmap> visited(Create("bitmap for in-order walk",
+                                       reinterpret_cast<byte*>(heap_begin_),
+                                       IndexToOffset(bitmap_size_ / kWordSize)));
+  CHECK(bitmap_begin_ != NULL);
+  CHECK(callback != NULL);
+  uintptr_t end = Size() / kWordSize;
+  for (uintptr_t i = 0; i < end; ++i) {
+    word w = bitmap_begin_[i];
+    if (UNLIKELY(w != 0)) {
+      uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+      while (w != 0) {
+        const size_t shift = CLZ(w);
+        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        WalkFieldsInOrder(visited.get(), callback, obj, arg);
+        w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+      }
+    }
+  }
+}
+
+std::string SpaceSetMap::GetName() const {
+  return name_;
+}
+
+void SpaceSetMap::SetName(const std::string& name) {
+  name_ = name;
+}
+
+SpaceSetMap::SpaceSetMap(const std::string& name) : name_(name) {
+
+}
+
+void SpaceSetMap::CopyFrom(const SpaceSetMap& space_set) {
+  contained_ = space_set.contained_;
+}
+
+std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap) {
+  return stream
+    << bitmap.GetName() << "["
+    << "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
+    << ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
+    << "]";
+  }
+
+}  // namespace art
diff --git a/src/gc/space_bitmap.h b/src/gc/space_bitmap.h
new file mode 100644
index 0000000..885491f
--- /dev/null
+++ b/src/gc/space_bitmap.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_SPACE_BITMAP_H_
+#define ART_SRC_SPACE_BITMAP_H_
+
+#include <limits.h>
+#include <set>
+#include <stdint.h>
+#include <vector>
+
+#include "UniquePtr.h"
+#include "globals.h"
+#include "logging.h"
+#include "mem_map.h"
+#include "utils.h"
+
+namespace art {
+
+class Object;
+
+class SpaceBitmap {
+ public:
+  static const size_t kAlignment = 8;
+
+  typedef void Callback(Object* obj, void* arg);
+
+  typedef void ScanCallback(Object* obj, void* finger, void* arg);
+
+  typedef void SweepCallback(size_t ptr_count, Object** ptrs, void* arg);
+
+  // Initialize a HeapBitmap so that it points to a bitmap large enough to cover a heap at
+  // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
+  static SpaceBitmap* Create(const std::string& name, byte* heap_begin, size_t heap_capacity);
+
+  ~SpaceBitmap();
+
+  // <offset> is the difference from .base to a pointer address.
+  // <index> is the index of .bits that contains the bit representing
+  //         <offset>.
+  static size_t OffsetToIndex(size_t offset) {
+      return offset / kAlignment / kBitsPerWord;
+  }
+
+  static uintptr_t IndexToOffset(size_t index) {
+    return static_cast<uintptr_t>(index * kAlignment * kBitsPerWord);
+  }
+
+  // Pack the bits in backwards so they come out in address order when using CLZ.
+  static word OffsetToMask(uintptr_t offset_) {
+    return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
+  }
+
+  inline void Set(const Object* obj) {
+    Modify(obj, true);
+  }
+
+  inline void Clear(const Object* obj) {
+    Modify(obj, false);
+  }
+
+  void Clear();
+
+  inline bool Test(const Object* obj) const {
+    uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+    DCHECK(HasAddress(obj)) << obj;
+    DCHECK(bitmap_begin_ != NULL);
+    DCHECK_GE(addr, heap_begin_);
+    const uintptr_t offset = addr - heap_begin_;
+    return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
+  }
+
+  // Return true iff <obj> is within the range of pointers that this bitmap could potentially cover,
+  // even if a bit has not been set for it.
+  bool HasAddress(const void* obj) const {
+    // If obj < heap_begin_ then offset underflows to some very large value past the end of the
+    // bitmap.
+    const uintptr_t offset = (uintptr_t)obj - heap_begin_;
+    const size_t index = OffsetToIndex(offset);
+    return index < bitmap_size_ / kWordSize;
+  }
+
+  void VisitRange(uintptr_t base, uintptr_t max, Callback* visitor, void* arg) const;
+
+  class ClearVisitor {
+   public:
+    explicit ClearVisitor(SpaceBitmap* const bitmap)
+        : bitmap_(bitmap) {
+    }
+
+    void operator ()(Object* obj) const {
+      bitmap_->Clear(obj);
+    }
+   private:
+    SpaceBitmap* const bitmap_;
+  };
+
+  template <typename Visitor>
+  void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
+    for (; visit_begin < visit_end; visit_begin += kAlignment ) {
+      visitor(reinterpret_cast<Object*>(visit_begin));
+    }
+  }
+
+  template <typename Visitor, typename FingerVisitor>
+  void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
+                        const Visitor& visitor, const FingerVisitor& finger_visitor) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    DCHECK_LT(visit_begin, visit_end);
+
+    const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1).
+    const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
+    const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+
+    size_t word_start = bit_index_start / kBitsPerWord;
+    size_t word_end = bit_index_end / kBitsPerWord;
+    DCHECK_LT(word_end * kWordSize, Size());
+
+    // Trim off left_bits of left bits.
+    size_t edge_word = bitmap_begin_[word_start];
+
+    // Handle bits on the left first as a special case
+    size_t left_bits = bit_index_start & (kBitsPerWord - 1);
+    if (left_bits != 0) {
+      edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
+    }
+
+    // If word_start == word_end then handle this case at the same place we handle the right edge.
+    if (edge_word != 0 && word_start < word_end) {
+      uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
+      finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+      do {
+        const size_t shift = CLZ(edge_word);
+        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        visitor(obj);
+        edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+      } while (edge_word != 0);
+    }
+    word_start++;
+
+    for (size_t i = word_start; i < word_end; i++) {
+      size_t w = bitmap_begin_[i];
+      if (w != 0) {
+        uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+        finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+        do {
+          const size_t shift = CLZ(w);
+          Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+          visitor(obj);
+          w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+        } while (w != 0);
+      }
+    }
+
+    // Handle the right edge, and also the left edge if both edges are on the same word.
+    size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+
+    // If word_start == word_end then we need to use the word which we removed the left bits.
+    if (word_start <= word_end) {
+      edge_word = bitmap_begin_[word_end];
+    }
+
+    // Bits that we trim off the right.
+    edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
+    uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
+    finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+    while (edge_word != 0) {
+      const size_t shift = CLZ(edge_word);
+      Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+      visitor(obj);
+      edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+    }
+  }
+
+  void Walk(Callback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void InOrderWalk(Callback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static void SweepWalk(const SpaceBitmap& live,
+                        const SpaceBitmap& mark,
+                        uintptr_t base, uintptr_t max,
+                        SweepCallback* thunk, void* arg);
+
+  void CopyFrom(SpaceBitmap* source_bitmap);
+
+  // Starting address of our internal storage.
+  word* Begin() {
+    return bitmap_begin_;
+  }
+
+  // Size of our internal storage
+  size_t Size() const {
+    return bitmap_size_;
+  }
+
+  // Size in bytes of the memory that the bitmaps spans.
+  size_t HeapSize() const {
+    return IndexToOffset(Size() / kWordSize);
+  }
+
+  uintptr_t HeapBegin() const {
+    return heap_begin_;
+  }
+
+  // The maximum address which the bitmap can span. (HeapBegin() <= object < HeapLimit()).
+  uintptr_t HeapLimit() const {
+    return HeapBegin() + static_cast<uintptr_t>(HeapSize());
+  }
+
+  // Set the max address which can covered by the bitmap.
+  void SetHeapLimit(uintptr_t new_end);
+
+  std::string GetName() const;
+  void SetName(const std::string& name);
+
+ private:
+  // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
+  // however, we document that this is expected on heap_end_
+  SpaceBitmap(const std::string& name, MemMap* mem_map, word* bitmap_begin, size_t bitmap_size, const void* heap_begin)
+      : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
+        heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
+        name_(name) {}
+
+  inline void Modify(const Object* obj, bool do_set) {
+    uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+    DCHECK_GE(addr, heap_begin_);
+    const uintptr_t offset = addr - heap_begin_;
+    const size_t index = OffsetToIndex(offset);
+    const word mask = OffsetToMask(offset);
+    DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
+    if (do_set) {
+      bitmap_begin_[index] |= mask;
+    } else {
+      bitmap_begin_[index] &= ~mask;
+    }
+  }
+
+  // Backing storage for bitmap.
+  UniquePtr<MemMap> mem_map_;
+
+  // This bitmap itself, word sized for efficiency in scanning.
+  word* const bitmap_begin_;
+
+  // Size of this bitmap.
+  size_t bitmap_size_;
+
+  // The base address of the heap, which corresponds to the word containing the first bit in the
+  // bitmap.
+  const uintptr_t heap_begin_;
+
+  // Name of this bitmap.
+  std::string name_;
+};
+
+// Like a bitmap except it keeps track of objects using sets.
+class SpaceSetMap {
+ public:
+  typedef std::set<const Object*> Objects;
+
+  bool IsEmpty() const {
+    return contained_.empty();
+  }
+
+  inline void Set(const Object* obj) {
+    contained_.insert(obj);
+  }
+
+  inline void Clear(const Object* obj) {
+    Objects::iterator found = contained_.find(obj);
+    if (found != contained_.end()) {
+      contained_.erase(found);
+    }
+  }
+
+  void Clear() {
+    contained_.clear();
+  }
+
+  inline bool Test(const Object* obj) const {
+    return contained_.find(obj) != contained_.end();
+  }
+
+  std::string GetName() const;
+  void SetName(const std::string& name);
+
+  void Walk(SpaceBitmap::Callback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
+
+  void CopyFrom(const SpaceSetMap& space_set);
+
+  template <typename Visitor>
+  void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS {
+    for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
+      visitor(*it);
+    }
+  }
+
+  SpaceSetMap(const std::string& name);
+
+  Objects& GetObjects() {
+    return contained_;
+  }
+
+ private:
+  std::string name_;
+  Objects contained_;
+};
+
+std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap);
+
+}  // namespace art
+
+#endif  // ART_SRC_SPACE_BITMAP_H_
diff --git a/src/gc/space_bitmap_test.cc b/src/gc/space_bitmap_test.cc
new file mode 100644
index 0000000..a2f1afc
--- /dev/null
+++ b/src/gc/space_bitmap_test.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space_bitmap.h"
+
+#include "common_test.h"
+#include "dlmalloc.h"
+#include "globals.h"
+#include "UniquePtr.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class SpaceBitmapTest : public CommonTest {
+ public:
+};
+
+TEST_F(SpaceBitmapTest, Init) {
+  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  size_t heap_capacity = 16 * MB;
+  UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test-bitmap",
+                                                          heap_begin, heap_capacity));
+  EXPECT_TRUE(space_bitmap.get() != NULL);
+}
+
+class BitmapVerify {
+ public:
+  BitmapVerify(SpaceBitmap* bitmap, const Object* begin, const Object* end)
+    : bitmap_(bitmap),
+      begin_(begin),
+      end_(end) {}
+
+  void operator ()(const Object* obj) {
+    EXPECT_TRUE(obj >= begin_);
+    EXPECT_TRUE(obj <= end_);
+    EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
+  }
+
+  SpaceBitmap* bitmap_;
+  const Object* begin_;
+  const Object* end_;
+};
+
+TEST_F(SpaceBitmapTest, ScanRange) {
+  byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+  size_t heap_capacity = 16 * MB;
+
+  UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test-bitmap",
+                                                          heap_begin, heap_capacity));
+  EXPECT_TRUE(space_bitmap.get() != NULL);
+
+  // Set all the odd bits in the first BitsPerWord * 3 to one.
+  for (size_t j = 0;j < kBitsPerWord * 3; ++j) {
+    const Object* obj = reinterpret_cast<Object*>(heap_begin + j * SpaceBitmap::kAlignment);
+    if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
+      space_bitmap->Set(obj);
+    }
+  }
+  // Try every possible starting bit in the first word. Then for each starting bit, try each
+  // possible length up to a maximum of kBitsPerWord * 2 - 1 bits.
+  // This handles all the cases, having runs which start and end on the same word, and different
+  // words.
+  for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
+    Object* start = reinterpret_cast<Object*>(heap_begin + i * SpaceBitmap::kAlignment);
+    for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
+      Object* end = reinterpret_cast<Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
+      BitmapVerify(space_bitmap.get(), start, end);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/src/gc/space_test.cc b/src/gc/space_test.cc
new file mode 100644
index 0000000..0319147
--- /dev/null
+++ b/src/gc/space_test.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space.h"
+
+#include "common_test.h"
+#include "dlmalloc.h"
+#include "globals.h"
+#include "UniquePtr.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class SpaceTest : public CommonTest {
+ public:
+  void SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+                                           int round, size_t growth_limit);
+  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
+};
+
+TEST_F(SpaceTest, Init) {
+  {
+    // Init < max == growth
+    UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
+    EXPECT_TRUE(space.get() != NULL);
+  }
+  {
+    // Init == max == growth
+    UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
+    EXPECT_TRUE(space.get() != NULL);
+  }
+  {
+    // Init > max == growth
+    UniquePtr<Space> space(AllocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
+    EXPECT_TRUE(space.get() == NULL);
+  }
+  {
+    // Growth == init < max
+    UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
+    EXPECT_TRUE(space.get() != NULL);
+  }
+  {
+    // Growth < init < max
+    UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
+    EXPECT_TRUE(space.get() == NULL);
+  }
+  {
+    // Init < growth < max
+    UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
+    EXPECT_TRUE(space.get() != NULL);
+  }
+  {
+    // Init < max < growth
+    UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
+    EXPECT_TRUE(space.get() == NULL);
+  }
+}
+
+// TODO: This test is not very good, we should improve it.
+// The test should do more allocations before the creation of the ZygoteSpace, and then do
+// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
+// the GC works with the ZygoteSpace.
+TEST_F(SpaceTest, ZygoteSpace) {
+    AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+    ASSERT_TRUE(space != NULL);
+
+    // Make space findable to the heap, will also delete space when runtime is cleaned up
+    Runtime::Current()->GetHeap()->AddSpace(space);
+    Thread* self = Thread::Current();
+
+    // Succeeds, fits without adjusting the footprint limit.
+    Object* ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+    EXPECT_TRUE(ptr1 != NULL);
+
+    // Fails, requires a higher footprint limit.
+    Object* ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+    EXPECT_TRUE(ptr2 == NULL);
+
+    // Succeeds, adjusts the footprint.
+    Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+    EXPECT_TRUE(ptr3 != NULL);
+
+    // Fails, requires a higher footprint limit.
+    Object* ptr4 = space->AllocWithoutGrowth(self, 8 * MB);
+    EXPECT_TRUE(ptr4 == NULL);
+
+    // Also fails, requires a higher allowed footprint.
+    Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+    EXPECT_TRUE(ptr5 == NULL);
+
+    // Release some memory.
+    size_t free3 = space->AllocationSize(ptr3);
+    space->Free(self, ptr3);
+    EXPECT_LE(8U * MB, free3);
+
+    // Succeeds, now that memory has been freed.
+    void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+    EXPECT_TRUE(ptr6 != NULL);
+
+    // Final clean up.
+    size_t free1 = space->AllocationSize(ptr1);
+    space->Free(self, ptr1);
+    EXPECT_LE(1U * MB, free1);
+
+    // Make sure that the zygote space isn't directly at the start of the space.
+    space->AllocWithoutGrowth(self, 1U * MB);
+    space = space->CreateZygoteSpace();
+
+    // Make space findable to the heap, will also delete space when runtime is cleaned up
+    Runtime::Current()->GetHeap()->AddSpace(space);
+
+    // Succeeds, fits without adjusting the footprint limit.
+    ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+    EXPECT_TRUE(ptr1 != NULL);
+
+    // Fails, requires a higher footprint limit.
+    ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+    EXPECT_TRUE(ptr2 == NULL);
+
+    // Succeeds, adjusts the footprint.
+    ptr3 = space->AllocWithGrowth(self, 2 * MB);
+    EXPECT_TRUE(ptr3 != NULL);
+    space->Free(self, ptr3);
+
+    // Final clean up.
+    free1 = space->AllocationSize(ptr1);
+    space->Free(self, ptr1);
+    EXPECT_LE(1U * MB, free1);
+}
+
+TEST_F(SpaceTest, AllocAndFree) {
+  AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+  ASSERT_TRUE(space != NULL);
+  Thread* self = Thread::Current();
+
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
+  Runtime::Current()->GetHeap()->AddSpace(space);
+
+  // Succeeds, fits without adjusting the footprint limit.
+  Object* ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+  EXPECT_TRUE(ptr1 != NULL);
+
+  // Fails, requires a higher footprint limit.
+  Object* ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+  EXPECT_TRUE(ptr2 == NULL);
+
+  // Succeeds, adjusts the footprint.
+  Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+  EXPECT_TRUE(ptr3 != NULL);
+
+  // Fails, requires a higher footprint limit.
+  Object* ptr4 = space->AllocWithoutGrowth(self, 8 * MB);
+  EXPECT_TRUE(ptr4 == NULL);
+
+  // Also fails, requires a higher allowed footprint.
+  Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+  EXPECT_TRUE(ptr5 == NULL);
+
+  // Release some memory.
+  size_t free3 = space->AllocationSize(ptr3);
+  space->Free(self, ptr3);
+  EXPECT_LE(8U * MB, free3);
+
+  // Succeeds, now that memory has been freed.
+  void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+  EXPECT_TRUE(ptr6 != NULL);
+
+  // Final clean up.
+  size_t free1 = space->AllocationSize(ptr1);
+  space->Free(self, ptr1);
+  EXPECT_LE(1U * MB, free1);
+}
+
+TEST_F(SpaceTest, AllocAndFreeList) {
+  AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+  ASSERT_TRUE(space != NULL);
+
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
+  Runtime::Current()->GetHeap()->AddSpace(space);
+  Thread* self = Thread::Current();
+
+  // Succeeds, fits without adjusting the max allowed footprint.
+  Object* lots_of_objects[1024];
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    lots_of_objects[i] = space->AllocWithoutGrowth(self, 16);
+    EXPECT_TRUE(lots_of_objects[i] != NULL);
+  }
+
+  // Release memory and check pointers are NULL
+  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == NULL);
+  }
+
+  // Succeeds, fits by adjusting the max allowed footprint.
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    lots_of_objects[i] = space->AllocWithGrowth(self, 1024);
+    EXPECT_TRUE(lots_of_objects[i] != NULL);
+  }
+
+  // Release memory and check pointers are NULL
+  space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+  for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == NULL);
+  }
+}
+
+static size_t test_rand() {
+  // TODO: replace this with something random yet deterministic
+  return rand();
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+                                                    int round, size_t growth_limit) {
+  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
+      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
+    // No allocation can succeed
+    return;
+  }
+  // Mspace for raw dlmalloc operations
+  void* mspace = space->GetMspace();
+
+  // mspace's footprint equals amount of resources requested from system
+  size_t footprint = mspace_footprint(mspace);
+
+  // mspace must at least have its book keeping allocated
+  EXPECT_GT(footprint, 0u);
+
+  // mspace but it shouldn't exceed the initial size
+  EXPECT_LE(footprint, growth_limit);
+
+  // space's size shouldn't exceed the initial size
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // this invariant should always hold or else the mspace has grown to be larger than what the
+  // space believes its size is (which will break invariants)
+  EXPECT_GE(space->Size(), footprint);
+
+  // Fill the space with lots of small objects up to the growth limit
+  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
+  UniquePtr<Object*[]> lots_of_objects(new Object*[max_objects]);
+  size_t last_object = 0;  // last object for which allocation succeeded
+  size_t amount_allocated = 0;  // amount of space allocated
+  Thread* self = Thread::Current();
+  for (size_t i = 0; i < max_objects; i++) {
+    size_t alloc_fails = 0;  // number of failed allocations
+    size_t max_fails = 30;  // number of times we fail allocation before giving up
+    for (; alloc_fails < max_fails; alloc_fails++) {
+      size_t alloc_size;
+      if (object_size > 0) {
+        alloc_size = object_size;
+      } else {
+        alloc_size = test_rand() % static_cast<size_t>(-object_size);
+        if (alloc_size < 8) {
+          alloc_size = 8;
+        }
+      }
+      Object* object;
+      if (round <= 1) {
+        object = space->AllocWithoutGrowth(self, alloc_size);
+      } else {
+        object = space->AllocWithGrowth(self, alloc_size);
+      }
+      footprint = mspace_footprint(mspace);
+      EXPECT_GE(space->Size(), footprint);  // invariant
+      if (object != NULL) {  // allocation succeeded
+        lots_of_objects.get()[i] = object;
+        size_t allocation_size = space->AllocationSize(object);
+        if (object_size > 0) {
+          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+        } else {
+          EXPECT_GE(allocation_size, 8u);
+        }
+        amount_allocated += allocation_size;
+        break;
+      }
+    }
+    if (alloc_fails == max_fails) {
+      last_object = i;
+      break;
+    }
+  }
+  CHECK_NE(last_object, 0u);  // we should have filled the space
+  EXPECT_GT(amount_allocated, 0u);
+
+  // We shouldn't have gone past the growth_limit
+  EXPECT_LE(amount_allocated, growth_limit);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // footprint and size should agree with amount allocated
+  EXPECT_GE(footprint, amount_allocated);
+  EXPECT_GE(space->Size(), amount_allocated);
+
+  // Release storage in a semi-adhoc manner
+  size_t free_increment = 96;
+  while (true) {
+    // Give the space a haircut
+    space->Trim();
+
+    // Bounds sanity
+    footprint = mspace_footprint(mspace);
+    EXPECT_LE(amount_allocated, growth_limit);
+    EXPECT_GE(footprint, amount_allocated);
+    EXPECT_LE(footprint, growth_limit);
+    EXPECT_GE(space->Size(), amount_allocated);
+    EXPECT_LE(space->Size(), growth_limit);
+
+    if (free_increment == 0) {
+      break;
+    }
+
+    // Free some objects
+    for (size_t i = 0; i < last_object; i += free_increment) {
+      Object* object = lots_of_objects.get()[i];
+      if (object == NULL) {
+        continue;
+      }
+      size_t allocation_size = space->AllocationSize(object);
+      if (object_size > 0) {
+        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+      } else {
+        EXPECT_GE(allocation_size, 8u);
+      }
+      space->Free(self, object);
+      lots_of_objects.get()[i] = NULL;
+      amount_allocated -= allocation_size;
+      footprint = mspace_footprint(mspace);
+      EXPECT_GE(space->Size(), footprint);  // invariant
+    }
+
+    free_increment >>= 1;
+  }
+
+  // All memory was released, try a large allocation to check freed memory is being coalesced
+  Object* large_object;
+  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
+  if (round <= 1) {
+    large_object = space->AllocWithoutGrowth(self, three_quarters_space);
+  } else {
+    large_object = space->AllocWithGrowth(self, three_quarters_space);
+  }
+  EXPECT_TRUE(large_object != NULL);
+
+  // Sanity check footprint
+  footprint = mspace_footprint(mspace);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_GE(space->Size(), footprint);
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // Clean up
+  space->Free(self, large_object);
+
+  // Sanity check footprint
+  footprint = mspace_footprint(mspace);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_GE(space->Size(), footprint);
+  EXPECT_LE(space->Size(), growth_limit);
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size) {
+  size_t initial_size = 4 * MB;
+  size_t growth_limit = 8 * MB;
+  size_t capacity = 16 * MB;
+  AllocSpace* space(AllocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
+  ASSERT_TRUE(space != NULL);
+
+  // Basic sanity
+  EXPECT_EQ(space->Capacity(), growth_limit);
+  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
+
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
+  Runtime::Current()->GetHeap()->AddSpace(space);
+
+  // In this round we don't allocate with growth and therefore can't grow past the initial size.
+  // This effectively makes the growth_limit the initial_size, so assert this.
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
+  // Remove growth limit
+  space->ClearGrowthLimit();
+  EXPECT_EQ(space->Capacity(), capacity);
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
+}
+
+#define TEST_SizeFootPrintGrowthLimitAndTrim(name, size) \
+  TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
+    SizeFootPrintGrowthLimitAndTrimDriver(size); \
+  } \
+  TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
+    SizeFootPrintGrowthLimitAndTrimDriver(-size); \
+  }
+
+// Each size test is its own test so that we get a fresh heap each time
+TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_8B) {
+  SizeFootPrintGrowthLimitAndTrimDriver(8);
+}
+TEST_SizeFootPrintGrowthLimitAndTrim(16B, 16)
+TEST_SizeFootPrintGrowthLimitAndTrim(24B, 24)
+TEST_SizeFootPrintGrowthLimitAndTrim(32B, 32)
+TEST_SizeFootPrintGrowthLimitAndTrim(64B, 64)
+TEST_SizeFootPrintGrowthLimitAndTrim(128B, 128)
+TEST_SizeFootPrintGrowthLimitAndTrim(1KB, 1 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4KB, 4 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(1MB, 1 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
+
+}  // namespace art