Fix and re-enable FreeList large object space for 64 bit

Not enabled on 32 bit due to virtual memory fragmentation concerns.
The new free list large object space ensures that allocations are
page aligned by using a side table for accounting data.

(cherry picked from commit 66e222aa48e6d2fe4c78a1df938364b82bc83e72)

Change-Id: Idbcbe75cb86b6d9b3d8b20f3048631a48c511458
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2048160..bb7da0d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -83,8 +83,13 @@
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
 static constexpr double kStickyGcThroughputAdjustment = 1.0;
-// Whether or not we use the free list large object space.
+// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
+// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
+#if USE_ART_LOW_4G_ALLOCATOR
+static constexpr bool kUseFreeListSpaceForLOS = true;
+#else
 static constexpr bool kUseFreeListSpaceForLOS = false;
+#endif
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9742277..e775965 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -131,7 +131,6 @@
   static constexpr bool kMeasureAllocationTime = false;
   // Primitive arrays larger than this size are put in the large object space.
   static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
-
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index d5a03c6..2a43712 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -192,6 +192,96 @@
   }
 }
 
+// Keeps track of allocation sizes + whether or not the previous allocation is free.
+// Used to coalesce free blocks and find the best fit block for an allocation.
+class AllocationInfo {
+ public:
+  AllocationInfo() : prev_free_(0), alloc_size_(0) {
+  }
+  // Return the number of pages that the allocation info covers.
+  size_t AlignSize() const {
+    return alloc_size_ & ~kFlagFree;
+  }
+  // Returns the allocation size in bytes.
+  size_t ByteSize() const {
+    return AlignSize() * FreeListSpace::kAlignment;
+  }
+  // Updates the allocation size and whether or not it is free.
+  void SetByteSize(size_t size, bool free) {
+    DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
+    alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U);
+  }
+  bool IsFree() const {
+    return (alloc_size_ & kFlagFree) != 0;
+  }
+  // Finds and returns the next non free allocation info after ourself.
+  AllocationInfo* GetNextInfo() {
+    return this + AlignSize();
+  }
+  const AllocationInfo* GetNextInfo() const {
+    return this + AlignSize();
+  }
+  // Returns the previous free allocation info by using the prev_free_ member to figure out
+  // where it is. This is only used for coalescing so we only need to be able to do it if the
+  // previous allocation info is free.
+  AllocationInfo* GetPrevFreeInfo() {
+    DCHECK_NE(prev_free_, 0U);
+    return this - prev_free_;
+  }
+  // Returns the address of the object associated with this allocation info.
+  mirror::Object* GetObjectAddress() {
+    return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
+  }
+  // Return how many kAlignment units there are before the free block.
+  size_t GetPrevFree() const {
+    return prev_free_;
+  }
+  // Returns how many free bytes there is before the block.
+  size_t GetPrevFreeBytes() const {
+    return GetPrevFree() * FreeListSpace::kAlignment;
+  }
+  // Update the size of the free block prior to the allocation.
+  void SetPrevFreeBytes(size_t bytes) {
+    DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
+    prev_free_ = bytes / FreeListSpace::kAlignment;
+  }
+
+ private:
+  // Used to implement best fit object allocation. Each allocation has an AllocationInfo which
+  // contains the size of the previous free block preceding it. Implemented in such a way that we
+  // can also find the iterator for any allocation info pointer.
+  static constexpr uint32_t kFlagFree = 0x8000000;
+  // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
+  // allocation before us is not free.
+  // These variables are undefined in the middle of allocations / free blocks.
+  uint32_t prev_free_;
+  // Allocation size of this object in kAlignment as the unit.
+  uint32_t alloc_size_;
+};
+
+size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
+  DCHECK_GE(info, allocation_info_);
+  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+  return info - allocation_info_;
+}
+
+AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
+  return &allocation_info_[GetSlotIndexForAddress(address)];
+}
+
+const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
+  return &allocation_info_[GetSlotIndexForAddress(address)];
+}
+
+inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
+                                                      const AllocationInfo* b) const {
+  if (a->GetPrevFree() < b->GetPrevFree()) return true;
+  if (a->GetPrevFree() > b->GetPrevFree()) return false;
+  if (a->AlignSize() < b->AlignSize()) return true;
+  if (a->AlignSize() > b->AlignSize()) return false;
+  return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
+}
+
 FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
@@ -205,112 +295,113 @@
     : LargeObjectSpace(name, begin, end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
-  free_end_ = end - begin;
+  const size_t space_capacity = end - begin;
+  free_end_ = space_capacity;
+  CHECK_ALIGNED(space_capacity, kAlignment);
+  const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
+  std::string error_msg;
+  allocation_info_map_.reset(MemMap::MapAnonymous("large object free list space allocation info map",
+                                                  nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
+                                                  false, &error_msg));
+  CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
+      << error_msg;
+  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
 }
 
 FreeListSpace::~FreeListSpace() {}
 
 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
-  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
-  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
-    cur_header = cur_header->GetNextNonFree();
-    size_t alloc_size = cur_header->AllocationSize();
-    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
-    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
-    callback(byte_start, byte_end, alloc_size, arg);
-    callback(NULL, NULL, 0, arg);
-    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
+  const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  AllocationInfo* cur_info = &allocation_info_[0];
+  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
+  while (cur_info < end_info) {
+    if (!cur_info->IsFree()) {
+      size_t alloc_size = cur_info->ByteSize();
+      byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
+      byte* byte_end = byte_start + alloc_size;
+      callback(byte_start, byte_end, alloc_size, arg);
+      callback(nullptr, nullptr, 0, arg);
+    }
+    cur_info = cur_info->GetNextInfo();
   }
+  CHECK_EQ(cur_info, end_info);
 }
 
-void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
-  CHECK(!header->IsFree());
-  CHECK_GT(header->GetPrevFree(), size_t(0));
-  FreeBlocks::iterator found = free_blocks_.lower_bound(header);
-  CHECK(found != free_blocks_.end());
-  CHECK_EQ(*found, header);
-  free_blocks_.erase(found);
-}
-
-FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
-  DCHECK(Contains(obj));
-  return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
-      sizeof(AllocationHeader));
-}
-
-FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
-  // We know that there has to be at least one object after us or else we would have
-  // coalesced with the free end region. May be worth investigating a better way to do this
-  // as it may be expensive for large allocations.
-  for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
-    AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
-    if (!cur->IsFree()) return cur;
-  }
+void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
+  CHECK_GT(info->GetPrevFree(), 0U);
+  auto it = free_blocks_.lower_bound(info);
+  CHECK(it != free_blocks_.end());
+  CHECK_EQ(*it, info);
+  free_blocks_.erase(it);
 }
 
 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
   MutexLock mu(self, lock_);
-  DCHECK(Contains(obj));
-  AllocationHeader* header = GetAllocationHeader(obj);
-  CHECK(IsAligned<kAlignment>(header));
-  size_t allocation_size = header->AllocationSize();
-  DCHECK_GT(allocation_size, size_t(0));
-  DCHECK(IsAligned<kAlignment>(allocation_size));
+  DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
+                        << reinterpret_cast<void*>(End());
+  DCHECK_ALIGNED(obj, kAlignment);
+  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
+  DCHECK(!info->IsFree());
+  const size_t allocation_size = info->ByteSize();
+  DCHECK_GT(allocation_size, 0U);
+  DCHECK_ALIGNED(allocation_size, kAlignment);
+  info->SetByteSize(allocation_size, true);  // Mark as free.
   // Look at the next chunk.
-  AllocationHeader* next_header = header->GetNextAllocationHeader();
+  AllocationInfo* next_info = info->GetNextInfo();
   // Calculate the start of the end free block.
   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  size_t header_prev_free = header->GetPrevFree();
+  size_t prev_free_bytes = info->GetPrevFreeBytes();
   size_t new_free_size = allocation_size;
-  if (header_prev_free) {
-    new_free_size += header_prev_free;
-    RemoveFreePrev(header);
+  if (prev_free_bytes != 0) {
+    // Coalesce with previous free chunk.
+    new_free_size += prev_free_bytes;
+    RemoveFreePrev(info);
+    info = info->GetPrevFreeInfo();
+    // The previous allocation info must not be free since we are supposed to always coalesce.
+    DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
   }
-  if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
+  uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
+  if (next_addr >= free_end_start) {
     // Easy case, the next chunk is the end free region.
-    CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
+    CHECK_EQ(next_addr, free_end_start);
     free_end_ += new_free_size;
   } else {
-    AllocationHeader* new_free_header;
-    DCHECK(IsAligned<kAlignment>(next_header));
-    if (next_header->IsFree()) {
-      // Find the next chunk by reading each page until we hit one with non-zero chunk.
-      AllocationHeader* next_next_header = next_header->GetNextNonFree();
-      DCHECK(IsAligned<kAlignment>(next_next_header));
-      DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
-      RemoveFreePrev(next_next_header);
-      new_free_header = next_next_header;
-      new_free_size += next_next_header->GetPrevFree();
+    AllocationInfo* new_free_info;
+    if (next_info->IsFree()) {
+      AllocationInfo* next_next_info = next_info->GetNextInfo();
+      // Next next info can't be free since we always coalesce.
+      DCHECK(!next_next_info->IsFree());
+      DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+      new_free_info = next_next_info;
+      new_free_size += next_next_info->GetPrevFreeBytes();
+      RemoveFreePrev(next_next_info);
     } else {
-      new_free_header = next_header;
+      new_free_info = next_info;
     }
-    new_free_header->prev_free_ = new_free_size;
-    free_blocks_.insert(new_free_header);
+    new_free_info->SetPrevFreeBytes(new_free_size);
+    free_blocks_.insert(new_free_info);
+    info->SetByteSize(new_free_size, true);
+    DCHECK_EQ(info->GetNextInfo(), new_free_info);
   }
   --num_objects_allocated_;
   DCHECK_LE(allocation_size, num_bytes_allocated_);
   num_bytes_allocated_ -= allocation_size;
-  madvise(header, allocation_size, MADV_DONTNEED);
+  madvise(obj, allocation_size, MADV_DONTNEED);
   if (kIsDebugBuild) {
     // Can't disallow reads since we use them to find next chunks during coalescing.
-    mprotect(header, allocation_size, PROT_READ);
+    mprotect(obj, allocation_size, PROT_READ);
   }
   return allocation_size;
 }
 
-bool FreeListSpace::Contains(const mirror::Object* obj) const {
-  return mem_map_->HasAddress(obj);
-}
-
 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
-  AllocationHeader* header = GetAllocationHeader(obj);
   DCHECK(Contains(obj));
-  DCHECK(!header->IsFree());
-  size_t alloc_size = header->AllocationSize();
+  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
+  DCHECK(!info->IsFree());
+  size_t alloc_size = info->ByteSize();
   if (usable_size != nullptr) {
-    *usable_size = alloc_size - sizeof(AllocationHeader);
+    *usable_size = alloc_size;
   }
   return alloc_size;
 }
@@ -318,56 +409,56 @@
 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                      size_t* usable_size) {
   MutexLock mu(self, lock_);
-  size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
-  AllocationHeader temp;
-  temp.SetPrevFree(allocation_size);
-  temp.SetAllocationSize(0);
-  AllocationHeader* new_header;
+  const size_t allocation_size = RoundUp(num_bytes, kAlignment);
+  AllocationInfo temp_info;
+  temp_info.SetPrevFreeBytes(allocation_size);
+  temp_info.SetByteSize(0, false);
+  AllocationInfo* new_info;
   // Find the smallest chunk at least num_bytes in size.
-  FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
-  if (found != free_blocks_.end()) {
-    AllocationHeader* header = *found;
-    free_blocks_.erase(found);
-
-    // Fit our object in the previous free header space.
-    new_header = header->GetPrevFreeAllocationHeader();
-
-    // Remove the newly allocated block from the header and update the prev_free_.
-    header->prev_free_ -= allocation_size;
-    if (header->prev_free_ > 0) {
+  auto it = free_blocks_.lower_bound(&temp_info);
+  if (it != free_blocks_.end()) {
+    AllocationInfo* info = *it;
+    free_blocks_.erase(it);
+    // Fit our object in the previous allocation info free space.
+    new_info = info->GetPrevFreeInfo();
+    // Remove the newly allocated block from the info and update the prev_free_.
+    info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
+    if (info->GetPrevFreeBytes() > 0) {
+      AllocationInfo* new_free = info - info->GetPrevFree();
+      new_free->SetPrevFreeBytes(0);
+      new_free->SetByteSize(info->GetPrevFreeBytes(), true);
       // If there is remaining space, insert back into the free set.
-      free_blocks_.insert(header);
+      free_blocks_.insert(info);
     }
   } else {
     // Try to steal some memory from the free space at the end of the space.
     if (LIKELY(free_end_ >= allocation_size)) {
       // Fit our object at the start of the end free block.
-      new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
+      new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
       free_end_ -= allocation_size;
     } else {
       return nullptr;
     }
   }
-
   DCHECK(bytes_allocated != nullptr);
   *bytes_allocated = allocation_size;
   if (usable_size != nullptr) {
-    *usable_size = allocation_size - sizeof(AllocationHeader);
+    *usable_size = allocation_size;
   }
   // Need to do these inside of the lock.
   ++num_objects_allocated_;
   ++total_objects_allocated_;
   num_bytes_allocated_ += allocation_size;
   total_bytes_allocated_ += allocation_size;
-
+  mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
   // We always put our object at the start of the free block, there can not be another free block
   // before it.
   if (kIsDebugBuild) {
-    mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
+    mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
   }
-  new_header->SetPrevFree(0);
-  new_header->SetAllocationSize(allocation_size);
-  return new_header->GetObjectAddress();
+  new_info->SetPrevFreeBytes(0);
+  new_info->SetByteSize(allocation_size, false);
+  return obj;
 }
 
 void FreeListSpace::Dump(std::ostream& os) const {
@@ -376,21 +467,20 @@
      << " begin: " << reinterpret_cast<void*>(Begin())
      << " end: " << reinterpret_cast<void*>(End()) << "\n";
   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
-  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
-    byte* free_start = reinterpret_cast<byte*>(cur_header);
-    cur_header = cur_header->GetNextNonFree();
-    byte* free_end = reinterpret_cast<byte*>(cur_header);
-    if (free_start != free_end) {
-      os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
-         << " of length " << free_end - free_start << " bytes\n";
+  const AllocationInfo* cur_info =
+      GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
+  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
+  while (cur_info < end_info) {
+    size_t size = cur_info->ByteSize();
+    uintptr_t address = GetAddressForAllocationInfo(cur_info);
+    if (cur_info->IsFree()) {
+      os << "Free block at address: " << reinterpret_cast<const void*>(address)
+         << " of length " << size << " bytes\n";
+    } else {
+      os << "Large object at address: " << reinterpret_cast<const void*>(address)
+         << " of length " << size << " bytes\n";
     }
-    size_t alloc_size = cur_header->AllocationSize();
-    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
-    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
-    os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
-       << " of length " << byte_end - byte_start << " bytes\n";
-    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
+    cur_info = cur_info->GetNextInfo();
   }
   if (free_end_) {
     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 09a0919..9d5e090 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -29,13 +29,14 @@
 namespace gc {
 namespace space {
 
+class AllocationInfo;
+
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
   SpaceType GetType() const OVERRIDE {
     return kSpaceTypeLargeObjectSpace;
   }
-
   void SwapBitmaps();
   void CopyLiveToMarked();
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
@@ -44,57 +45,53 @@
   uint64_t GetBytesAllocated() OVERRIDE {
     return num_bytes_allocated_;
   }
-
   uint64_t GetObjectsAllocated() OVERRIDE {
     return num_objects_allocated_;
   }
-
   uint64_t GetTotalBytesAllocated() const {
     return total_bytes_allocated_;
   }
-
   uint64_t GetTotalObjectsAllocated() const {
     return total_objects_allocated_;
   }
-
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
-
   // LargeObjectSpaces don't have thread local state.
   void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
   }
   void RevokeAllThreadLocalBuffers() OVERRIDE {
   }
-
   bool IsAllocSpace() const OVERRIDE {
     return true;
   }
-
   AllocSpace* AsAllocSpace() OVERRIDE {
     return this;
   }
-
   collector::ObjectBytePair Sweep(bool swap_bitmaps);
-
   virtual bool CanMoveObjects() const OVERRIDE {
     return false;
   }
-
   // Current address at which the space begins, which may vary as the space is filled.
   byte* Begin() const {
     return begin_;
   }
-
   // Current address at which the space ends, which may vary as the space is filled.
   byte* End() const {
     return end_;
   }
-
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+  // Return true if we contain the specified address.
+  bool Contains(const mirror::Object* obj) const {
+    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    return Begin() <= byte_obj && byte_obj < End();
+  }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
   explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
-
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   // Approximate number of bytes which have been allocated into the space.
@@ -102,7 +99,6 @@
   uint64_t num_objects_allocated_;
   uint64_t total_bytes_allocated_;
   uint64_t total_objects_allocated_;
-
   // Begin and end, may change as more large objects are allocated.
   byte* begin_;
   byte* end_;
@@ -119,7 +115,6 @@
   // Creates a large object space. Allocations into the large object space use memory maps instead
   // of malloc.
   static LargeObjectMapSpace* Create(const std::string& name);
-
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -145,126 +140,52 @@
 // A continuous large object space with a free-list to handle holes.
 class FreeListSpace FINAL : public LargeObjectSpace {
  public:
+  static constexpr size_t kAlignment = kPageSize;
+
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
-
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size) OVERRIDE;
   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
-  bool Contains(const mirror::Object* obj) const OVERRIDE;
   void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
-
-  // Address at which the space begins.
-  byte* Begin() const {
-    return begin_;
-  }
-
-  // Address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
-    return end_;
-  }
-
-  // Current size of space
-  size_t Size() const {
-    return End() - Begin();
-  }
-
   void Dump(std::ostream& os) const;
 
  protected:
-  static const size_t kAlignment = kPageSize;
-
-  class AllocationHeader {
-   public:
-    // Returns the allocation size, includes the header.
-    size_t AllocationSize() const {
-      return alloc_size_;
-    }
-
-    // Updates the allocation size in the header, the allocation size includes the header itself.
-    void SetAllocationSize(size_t size) {
-      DCHECK(IsAligned<kPageSize>(size));
-      alloc_size_ = size;
-    }
-
-    bool IsFree() const {
-      return AllocationSize() == 0;
-    }
-
-    // Returns the previous free allocation header by using the prev_free_ member to figure out
-    // where it is. If prev free is 0 then we just return ourself.
-    AllocationHeader* GetPrevFreeAllocationHeader() {
-      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) - prev_free_);
-    }
-
-    // Returns the address of the object associated with this allocation header.
-    mirror::Object* GetObjectAddress() {
-      return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
-    }
-
-    // Returns the next allocation header after the object associated with this allocation header.
-    AllocationHeader* GetNextAllocationHeader() {
-      DCHECK_NE(alloc_size_, 0U);
-      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) + alloc_size_);
-    }
-
-    // Returns how many free bytes there is before the block.
-    size_t GetPrevFree() const {
-      return prev_free_;
-    }
-
-    // Update the size of the free block prior to the allocation.
-    void SetPrevFree(size_t prev_free) {
-      DCHECK(IsAligned<kPageSize>(prev_free));
-      prev_free_ = prev_free;
-    }
-
-    // Finds and returns the next non free allocation header after ourself.
-    // TODO: Optimize, currently O(n) for n free following pages.
-    AllocationHeader* GetNextNonFree();
-
-    // Used to implement best fit object allocation. Each allocation has an AllocationHeader which
-    // contains the size of the previous free block preceding it. Implemented in such a way that we
-    // can also find the iterator for any allocation header pointer.
-    class SortByPrevFree {
-     public:
-      bool operator()(const AllocationHeader* a, const AllocationHeader* b) const {
-        if (a->GetPrevFree() < b->GetPrevFree()) return true;
-        if (a->GetPrevFree() > b->GetPrevFree()) return false;
-        if (a->AllocationSize() < b->AllocationSize()) return true;
-        if (a->AllocationSize() > b->AllocationSize()) return false;
-        return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
-      }
-    };
-
-   private:
-    // Contains the size of the previous free block, if 0 then the memory preceding us is an
-    // allocation.
-    size_t prev_free_;
-
-    // Allocation size of this object, 0 means that the allocation header is free memory.
-    size_t alloc_size_;
-
-    friend class FreeListSpace;
-  };
-
   FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
-
+  size_t GetSlotIndexForAddress(uintptr_t address) const {
+    DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
+    return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
+  }
+  size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
+  AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
+  const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
+  uintptr_t GetAllocationAddressForSlot(size_t slot) const {
+    return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
+  }
+  uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
+    return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
+  }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
-  void RemoveFreePrev(AllocationHeader* header) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
-  // Finds the allocation header corresponding to obj.
-  AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
-
-  typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
-                   TrackingAllocator<AllocationHeader*, kAllocatorTagLOSFreeList>> FreeBlocks;
+  class SortByPrevFree {
+   public:
+    bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
+  };
+  typedef std::set<AllocationInfo*, SortByPrevFree,
+                   TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
 
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
   std::unique_ptr<MemMap> mem_map_;
+  // Side table for allocation info, one per page.
+  std::unique_ptr<MemMap> allocation_info_map_;
+  AllocationInfo* allocation_info_;
+
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Free bytes at the end of the space.
   size_t free_end_ GUARDED_BY(lock_);
   FreeBlocks free_blocks_ GUARDED_BY(lock_);
 };
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index f733584..c5d8abc 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -80,6 +80,8 @@
         ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
       }
     }
+    // Test that dump doesn't crash.
+    los->Dump(LOG(INFO));
 
     size_t bytes_allocated = 0;
     // Checks that the coalescing works.