Compacting collector.
The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.
Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.
Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.
Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.
Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.
Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..
TODO: Enable switching allocators, compacting on background, etc..
Bug: 8981901
Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
new file mode 100644
index 0000000..85ef2f4
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+
+#include "bump_pointer_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ byte* old_end;
+ byte* new_end;
+ do {
+ old_end = end_;
+ new_end = old_end + num_bytes;
+ // If there is no more room in the region, we are out of memory.
+ if (UNLIKELY(new_end > growth_end_)) {
+ return nullptr;
+ }
+ // TODO: Use a cas which always equals the size of pointers.
+ } while (android_atomic_cas(reinterpret_cast<int32_t>(old_end),
+ reinterpret_cast<int32_t>(new_end),
+ reinterpret_cast<volatile int32_t*>(&end_)) != 0);
+ // TODO: Less statistics?
+ total_bytes_allocated_.fetch_add(num_bytes);
+ num_objects_allocated_.fetch_add(1);
+ total_objects_allocated_.fetch_add(1);
+ return reinterpret_cast<mirror::Object*>(old_end);
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
new file mode 100644
index 0000000..06ba57e
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bump_pointer_space.h"
+#include "bump_pointer_space-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
+ byte* requested_begin) {
+ capacity = RoundUp(capacity, kPageSize);
+ std::string error_msg;
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
+ PROT_READ | PROT_WRITE, &error_msg));
+ if (mem_map.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+ << PrettySize(capacity) << " with message " << error_msg;
+ return nullptr;
+ }
+ return new BumpPointerSpace(name, mem_map.release());
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+ : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+ kGcRetentionPolicyAlwaysCollect),
+ num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+ growth_end_(limit) {
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
+ : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+ kGcRetentionPolicyAlwaysCollect),
+ num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+ growth_end_(mem_map->End()) {
+}
+
+mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) {
+ mirror::Object* ret = AllocNonvirtual(num_bytes);
+ if (LIKELY(ret != nullptr)) {
+ *bytes_allocated = num_bytes;
+ }
+ return ret;
+}
+
+size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) {
+ return AllocationSizeNonvirtual(obj);
+}
+
+void BumpPointerSpace::Clear() {
+ // Release the pages back to the operating system.
+ CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+ // Reset the end of the space back to the beginning, we move the end forward as we allocate
+ // objects.
+ SetEnd(Begin());
+ growth_end_ = Limit();
+ num_objects_allocated_ = 0;
+}
+
+void BumpPointerSpace::Dump(std::ostream& os) const {
+ os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+ << reinterpret_cast<void*>(Limit());
+}
+
+mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
+ const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+ return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
new file mode 100644
index 0000000..0faac0c
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+ class MarkSweep;
+} // namespace collector
+
+namespace space {
+
+// A bump pointer space is a space where objects may be allocated and garbage collected.
+class BumpPointerSpace : public ContinuousMemMapAllocSpace {
+ public:
+ typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+ SpaceType GetType() const {
+ return kSpaceTypeBumpPointerSpace;
+ }
+
+ // Create a bump pointer space with the requested sizes. The requested base address is not
+ // guaranteed to be granted, if it is required, the caller should call Begin on the returned
+ // space to confirm the request was granted.
+ static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+
+ // Allocate num_bytes, returns nullptr if the space is full.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+ mirror::Object* AllocNonvirtual(size_t num_bytes);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Nos unless we support free lists.
+ virtual size_t Free(Thread*, mirror::Object*) {
+ return 0;
+ }
+ virtual size_t FreeList(Thread*, size_t, mirror::Object**) {
+ return 0;
+ }
+
+ size_t AllocationSizeNonvirtual(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return obj->SizeOf();
+ }
+
+ // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+ // maximum reserved size of the heap.
+ void ClearGrowthLimit() {
+ growth_end_ = Limit();
+ }
+
+ // Override capacity so that we only return the possibly limited capacity
+ size_t Capacity() const {
+ return growth_end_ - begin_;
+ }
+
+ // The total amount of memory reserved for the space.
+ size_t NonGrowthLimitCapacity() const {
+ return GetMemMap()->Size();
+ }
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return nullptr;
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ return nullptr;
+ }
+
+ // Clear the memory and reset the pointer to the start of the space.
+ void Clear();
+
+ void Dump(std::ostream& os) const;
+
+ uint64_t GetBytesAllocated() {
+ return Size();
+ }
+
+ uint64_t GetObjectsAllocated() {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() {
+ return total_objects_allocated_;
+ }
+
+ bool Contains(const mirror::Object* obj) const {
+ const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+ return byte_obj >= Begin() && byte_obj < End();
+ }
+
+ // TODO: Change this? Mainly used for compacting to a particular region of memory.
+ BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+
+ // Return the object which comes after obj, while ensuring alignment.
+ static mirror::Object* GetNextObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ protected:
+ BumpPointerSpace(const std::string& name, MemMap* mem_map);
+
+ size_t InternalAllocationSize(const mirror::Object* obj);
+ mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // Approximate number of bytes which have been allocated into the space.
+ AtomicInteger num_objects_allocated_;
+ AtomicInteger total_bytes_allocated_;
+ AtomicInteger total_objects_allocated_;
+
+ // Alignment.
+ static constexpr size_t kAlignment = 8;
+
+ byte* growth_end_;
+
+ private:
+ friend class collector::MarkSweep;
+ DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9ebc16a..8a5e33a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -102,8 +102,8 @@
}
ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, size_t growth_limit, size_t initial_size) :
- DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+ byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
+ DlMallocSpace(name, mem_map, mspace, begin, end, limit, growth_limit) {
VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
}
@@ -117,15 +117,13 @@
size_t DlMallocSpace::bitmap_index_ = 0;
DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
- byte* end, size_t growth_limit)
- : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
+ byte* end, byte* limit, size_t growth_limit)
+ : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
recent_free_pos_(0), total_bytes_freed_(0), total_objects_freed_(0),
lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
-
size_t bitmap_index = bitmap_index_++;
-
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
@@ -133,12 +131,10 @@
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
-
mark_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
-
for (auto& freed : recent_freed_objects_) {
freed.first = nullptr;
freed.second = nullptr;
@@ -207,12 +203,14 @@
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
DlMallocSpace* space;
+ byte* begin = mem_map_ptr->Begin();
if (RUNNING_ON_VALGRIND > 0) {
- space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity,
growth_limit, initial_size);
} else {
- space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+ space = new DlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity, growth_limit);
}
+ // We start out with only the initial size possibly containing objects.
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -318,7 +316,8 @@
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
}
DlMallocSpace* alloc_space =
- new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit);
+ new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, limit_,
+ growth_limit);
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
@@ -343,8 +342,7 @@
}
void DlMallocSpace::RegisterRecentFree(mirror::Object* ptr) {
- recent_freed_objects_[recent_free_pos_].first = ptr;
- recent_freed_objects_[recent_free_pos_].second = ptr->GetClass();
+ recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass());
recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
}
@@ -412,8 +410,8 @@
// Callback from dlmalloc when it needs to increase the footprint
extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Heap* heap = Runtime::Current()->GetHeap();
- DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
- return heap->GetAllocSpace()->MoreCore(increment);
+ DCHECK_EQ(heap->GetNonMovingSpace()->GetMspace(), mspace);
+ return heap->GetNonMovingSpace()->MoreCore(increment);
}
void* DlMallocSpace::MoreCore(intptr_t increment) {
@@ -482,6 +480,29 @@
return mspace_footprint_limit(mspace_);
}
+// Returns the old mark bitmap.
+accounting::SpaceBitmap* DlMallocSpace::BindLiveToMarkBitmap() {
+ accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release();
+ temp_bitmap_.reset(mark_bitmap);
+ mark_bitmap_.reset(live_bitmap);
+ return mark_bitmap;
+}
+
+bool DlMallocSpace::HasBoundBitmaps() const {
+ return temp_bitmap_.get() != nullptr;
+}
+
+void DlMallocSpace::UnBindBitmaps() {
+ CHECK(HasBoundBitmaps());
+ // At this point, the temp_bitmap holds our old mark bitmap.
+ accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release();
+ CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
+ mark_bitmap_.reset(new_bitmap);
+ DCHECK(temp_bitmap_.get() == NULL);
+}
+
+
void DlMallocSpace::SetFootprintLimit(size_t new_size) {
MutexLock mu(Thread::Current(), lock_);
VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
@@ -504,17 +525,25 @@
}
uint64_t DlMallocSpace::GetBytesAllocated() {
- MutexLock mu(Thread::Current(), lock_);
- size_t bytes_allocated = 0;
- mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
- return bytes_allocated;
+ if (mspace_ != nullptr) {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
+ } else {
+ return Size();
+ }
}
uint64_t DlMallocSpace::GetObjectsAllocated() {
- MutexLock mu(Thread::Current(), lock_);
- size_t objects_allocated = 0;
- mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
- return objects_allocated;
+ if (mspace_ != nullptr) {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t objects_allocated = 0;
+ mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
+ return objects_allocated;
+ } else {
+ return 0;
+ }
}
} // namespace space
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 522535e..59dafe3 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@
namespace space {
// An alloc space is a space where objects may be allocated and garbage collected.
-class DlMallocSpace : public MemMapSpace, public AllocSpace {
+class DlMallocSpace : public ContinuousMemMapAllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -136,19 +136,30 @@
return GetObjectsAllocated() + total_objects_freed_;
}
+ // Returns the old mark bitmap.
+ accounting::SpaceBitmap* BindLiveToMarkBitmap();
+ bool HasBoundBitmaps() const;
+ void UnBindBitmaps();
+
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
+ // Used to ensure that failure happens when you free / allocate into an invalidated space. If we
+ // don't do this we may get heap corruption instead of a segfault at null.
+ void InvalidateMSpace() {
+ mspace_ = nullptr;
+ }
+
protected:
DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- size_t growth_limit);
+ byte* limit, size_t growth_limit);
private:
size_t InternalAllocationSize(const mirror::Object* obj);
mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
- void RegisterRecentFree(mirror::Object* ptr);
+ void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
UniquePtr<accounting::SpaceBitmap> live_bitmap_;
@@ -174,7 +185,7 @@
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Underlying malloc space
- void* const mspace_;
+ void* mspace_;
// The capacity of the alloc space until such time that ClearGrowthLimit is called.
// The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e12ee06..c6177bd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -39,8 +39,9 @@
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map,
accounting::SpaceBitmap* live_bitmap)
- : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
- DCHECK(live_bitmap != NULL);
+ : MemMapSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ kGcRetentionPolicyNeverCollect) {
+ DCHECK(live_bitmap != nullptr);
live_bitmap_.reset(live_bitmap);
}
@@ -332,7 +333,7 @@
void ImageSpace::Dump(std::ostream& os) const {
os << GetType()
- << "begin=" << reinterpret_cast<void*>(Begin())
+ << " begin=" << reinterpret_cast<void*>(Begin())
<< ",end=" << reinterpret_cast<void*>(End())
<< ",size=" << PrettySize(Size())
<< ",name=\"" << GetName() << "\"]";
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index ef889d4..07fb288 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -59,6 +59,14 @@
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+ virtual bool IsAllocSpace() const {
+ return true;
+ }
+
+ virtual AllocSpace* AsAllocSpace() {
+ return this;
+ }
+
protected:
explicit LargeObjectSpace(const std::string& name);
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
index 2c3b93c..f1031ff 100644
--- a/runtime/gc/space/space-inl.h
+++ b/runtime/gc/space/space-inl.h
@@ -27,18 +27,28 @@
namespace space {
inline ImageSpace* Space::AsImageSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+ DCHECK(IsImageSpace());
return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
}
inline DlMallocSpace* Space::AsDlMallocSpace() {
- DCHECK(GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace);
+ DCHECK(IsDlMallocSpace());
return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
}
inline LargeObjectSpace* Space::AsLargeObjectSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
- return reinterpret_cast<LargeObjectSpace*>(this);
+ DCHECK(IsLargeObjectSpace());
+ return down_cast<LargeObjectSpace*>(this);
+}
+
+inline ContinuousSpace* Space::AsContinuousSpace() {
+ DCHECK(IsContinuousSpace());
+ return down_cast<ContinuousSpace*>(this);
+}
+
+inline DiscontinuousSpace* Space::AsDiscontinuousSpace() {
+ DCHECK(IsDiscontinuousSpace());
+ return down_cast<DiscontinuousSpace*>(this);
}
} // namespace space
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index de48b74..8eb17e0 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -34,7 +34,6 @@
return os;
}
-
DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
GcRetentionPolicy gc_retention_policy) :
Space(name, gc_retention_policy),
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 6dd7952..4c05dde 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -42,7 +42,10 @@
namespace space {
+class AllocSpace;
+class ContinuousSpace;
class DlMallocSpace;
+class DiscontinuousSpace;
class ImageSpace;
class LargeObjectSpace;
@@ -64,6 +67,7 @@
kSpaceTypeImageSpace,
kSpaceTypeAllocSpace,
kSpaceTypeZygoteSpace,
+ kSpaceTypeBumpPointerSpace,
kSpaceTypeLargeObjectSpace,
};
std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
@@ -113,12 +117,35 @@
return GetType() == kSpaceTypeZygoteSpace;
}
+ // Is this space a bump pointer space?
+ bool IsBumpPointerSpace() const {
+ return GetType() == kSpaceTypeBumpPointerSpace;
+ }
+
// Does this space hold large objects and implement the large object space abstraction?
bool IsLargeObjectSpace() const {
return GetType() == kSpaceTypeLargeObjectSpace;
}
LargeObjectSpace* AsLargeObjectSpace();
+ virtual bool IsContinuousSpace() const {
+ return false;
+ }
+ ContinuousSpace* AsContinuousSpace();
+
+ virtual bool IsDiscontinuousSpace() const {
+ return false;
+ }
+ DiscontinuousSpace* AsDiscontinuousSpace();
+
+ virtual bool IsAllocSpace() const {
+ return false;
+ }
+ virtual AllocSpace* AsAllocSpace() {
+ LOG(FATAL) << "Unimplemented";
+ return nullptr;
+ }
+
virtual ~Space() {}
protected:
@@ -131,13 +158,13 @@
// Name of the space that may vary due to the Zygote fork.
std::string name_;
- private:
+ protected:
// When should objects within this space be reclaimed? Not constant as we vary it in the case
// of Zygote forking.
GcRetentionPolicy gc_retention_policy_;
+ private:
friend class art::gc::Heap;
-
DISALLOW_COPY_AND_ASSIGN(Space);
};
std::ostream& operator<<(std::ostream& os, const Space& space);
@@ -180,16 +207,31 @@
// continuous spaces can be marked in the card table.
class ContinuousSpace : public Space {
public:
- // Address at which the space begins
+ // Address at which the space begins.
byte* Begin() const {
return begin_;
}
- // Address at which the space ends, which may vary as the space is filled.
+ // Current address at which the space ends, which may vary as the space is filled.
byte* End() const {
return end_;
}
+ // The end of the address range covered by the space.
+ byte* Limit() const {
+ return limit_;
+ }
+
+ // Change the end of the space. Be careful with use since changing the end of a space to an
+ // invalid value may break the GC.
+ void SetEnd(byte* end) {
+ end_ = end;
+ }
+
+ void SetLimit(byte* limit) {
+ limit_ = limit;
+ }
+
// Current size of space
size_t Size() const {
return End() - Begin();
@@ -198,31 +240,42 @@
virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+ // Maximum which the mapped space can grow to.
+ virtual size_t Capacity() const {
+ return Limit() - Begin();
+ }
+
// Is object within this space? We check to see if the pointer is beyond the end first as
// continuous spaces are iterated over from low to high.
bool HasAddress(const mirror::Object* obj) const {
const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
- return byte_ptr < End() && byte_ptr >= Begin();
+ return byte_ptr >= Begin() && byte_ptr < Limit();
}
bool Contains(const mirror::Object* obj) const {
return HasAddress(obj);
}
+ virtual bool IsContinuousSpace() const {
+ return true;
+ }
+
virtual ~ContinuousSpace() {}
protected:
ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
- byte* begin, byte* end) :
- Space(name, gc_retention_policy), begin_(begin), end_(end) {
+ byte* begin, byte* end, byte* limit) :
+ Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
}
-
// The beginning of the storage for fast access.
- byte* const begin_;
+ byte* begin_;
// Current end of the space.
- byte* end_;
+ byte* volatile end_;
+
+ // Limit of the space.
+ byte* limit_;
private:
DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -241,6 +294,10 @@
return mark_objects_.get();
}
+ virtual bool IsDiscontinuousSpace() const {
+ return true;
+ }
+
virtual ~DiscontinuousSpace() {}
protected:
@@ -255,25 +312,12 @@
class MemMapSpace : public ContinuousSpace {
public:
- // Maximum which the mapped space can grow to.
- virtual size_t Capacity() const {
- return mem_map_->Size();
- }
-
// Size of the space without a limit on its growth. By default this is just the Capacity, but
// for the allocation space we support starting with a small heap and then extending it.
virtual size_t NonGrowthLimitCapacity() const {
return Capacity();
}
- protected:
- MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
- GcRetentionPolicy gc_retention_policy)
- : ContinuousSpace(name, gc_retention_policy,
- mem_map->Begin(), mem_map->Begin() + initial_size),
- mem_map_(mem_map) {
- }
-
MemMap* GetMemMap() {
return mem_map_.get();
}
@@ -282,13 +326,45 @@
return mem_map_.get();
}
- private:
+ protected:
+ MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+ GcRetentionPolicy gc_retention_policy)
+ : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
+ mem_map_(mem_map) {
+ }
+
// Underlying storage of the space
UniquePtr<MemMap> mem_map_;
+ private:
DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
};
+// Used by the heap compaction interface to enable copying from one type of alloc space to another.
+class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
+ public:
+ virtual bool IsAllocSpace() const {
+ return true;
+ }
+
+ virtual AllocSpace* AsAllocSpace() {
+ return this;
+ }
+
+ virtual void Clear() {
+ LOG(FATAL) << "Unimplemented";
+ }
+
+ protected:
+ ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
+ byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+ : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
+};
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 455168c..383714b 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -33,8 +33,8 @@
int round, size_t growth_limit);
void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
- void AddContinuousSpace(ContinuousSpace* space) {
- Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+ void AddSpace(ContinuousSpace* space) {
+ Runtime::Current()->GetHeap()->AddSpace(space);
}
};
@@ -91,7 +91,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the footprint limit.
@@ -136,7 +136,7 @@
space = space->CreateZygoteSpace("alloc space");
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -164,7 +164,7 @@
Thread* self = Thread::Current();
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -270,7 +270,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the max allowed footprint.
@@ -467,7 +467,7 @@
EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- AddContinuousSpace(space);
+ AddSpace(space);
// In this round we don't allocate with growth and therefore can't grow past the initial size.
// This effectively makes the growth_limit the initial_size, so assert this.