More of the concurrent copying collector.
Bug: 12687968
Change-Id: I62f70274d47df6d6cab714df95c518b750ce3105
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 04b09e9..9675ba6 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -57,7 +57,7 @@
kGcRetentionPolicyAlwaysCollect),
growth_end_(mem_map->End()),
objects_allocated_(0), bytes_allocated_(0),
- block_lock_("Block lock"),
+ block_lock_("Block lock", kBumpPointerSpaceBlockLock),
main_block_size_(0),
num_blocks_(0) {
}
@@ -172,7 +172,8 @@
// Walk all of the objects in the main block first.
while (pos < main_end) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- if (obj->GetClass() == nullptr) {
+ // No read barrier because obj may not be a valid object.
+ if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
// There is a race condition where a thread has just allocated an object but not set the
// class. We can't know the size of this object, so we don't visit it and exit the function
// since there is guaranteed to be not other blocks.
@@ -192,7 +193,8 @@
CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
// We don't know how many objects are allocated in the current block. When we hit a null class
// assume its the end. TODO: Have a thread update the header when it flushes the block?
- while (obj < end_obj && obj->GetClass() != nullptr) {
+ // No read barrier because obj may not be a valid object.
+ while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
callback(obj, arg);
obj = GetNextObject(obj);
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
new file mode 100644
index 0000000..fd00739
--- /dev/null
+++ b/runtime/gc/space/region_space-inl.h
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
+
+#include "region_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) {
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size);
+}
+
+inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ return Alloc(self, num_bytes, bytes_allocated, usable_size);
+}
+
+template<bool kForEvac>
+inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) {
+ DCHECK(IsAligned<kAlignment>(num_bytes));
+ mirror::Object* obj;
+ if (LIKELY(num_bytes <= kRegionSize)) {
+ // Non-large object.
+ if (!kForEvac) {
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ } else {
+ DCHECK(evac_region_ != nullptr);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ }
+ if (LIKELY(obj != nullptr)) {
+ return obj;
+ }
+ MutexLock mu(Thread::Current(), region_lock_);
+ // Retry with current region since another thread may have updated it.
+ if (!kForEvac) {
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ } else {
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ }
+ if (LIKELY(obj != nullptr)) {
+ return obj;
+ }
+ if (!kForEvac) {
+ // Retain sufficient free regions for full evacuation.
+ if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(time_);
+ r->SetNewlyAllocated();
+ ++num_non_free_regions_;
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ CHECK(obj != nullptr);
+ current_region_ = r;
+ return obj;
+ }
+ }
+ } else {
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(time_);
+ ++num_non_free_regions_;
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ CHECK(obj != nullptr);
+ evac_region_ = r;
+ return obj;
+ }
+ }
+ }
+ } else {
+ // Large object.
+ obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size);
+ if (LIKELY(obj != nullptr)) {
+ return obj;
+ }
+ }
+ return nullptr;
+}
+
+inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) {
+ DCHECK_EQ(state_, static_cast<uint8_t>(kRegionToSpace));
+ DCHECK(IsAligned<kAlignment>(num_bytes));
+ Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
+ uint8_t* old_top;
+ uint8_t* new_top;
+ do {
+ old_top = atomic_top->LoadRelaxed();
+ new_top = old_top + num_bytes;
+ if (UNLIKELY(new_top > end_)) {
+ return nullptr;
+ }
+ } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
+ reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
+ DCHECK_LE(atomic_top->LoadRelaxed(), end_);
+ DCHECK_LT(old_top, end_);
+ DCHECK_LE(new_top, end_);
+ *bytes_allocated = num_bytes;
+ if (usable_size != nullptr) {
+ *usable_size = num_bytes;
+ }
+ return reinterpret_cast<mirror::Object*>(old_top);
+}
+
+inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t num_bytes = obj->SizeOf();
+ if (usable_size != nullptr) {
+ if (LIKELY(num_bytes <= kRegionSize)) {
+ DCHECK(RefToRegion(obj)->IsNormal());
+ *usable_size = RoundUp(num_bytes, kAlignment);
+ } else {
+ DCHECK(RefToRegion(obj)->IsLarge());
+ *usable_size = RoundUp(num_bytes, kRegionSize);
+ }
+ }
+ return num_bytes;
+}
+
+template<RegionSpace::SubSpaceType kSubSpaceType>
+uint64_t RegionSpace::GetBytesAllocatedInternal() {
+ uint64_t bytes = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ continue;
+ }
+ switch (kSubSpaceType) {
+ case kAllSpaces:
+ bytes += r->BytesAllocated();
+ break;
+ case kFromSpace:
+ if (r->IsInFromSpace()) {
+ bytes += r->BytesAllocated();
+ }
+ break;
+ case kUnevacFromSpace:
+ if (r->IsInUnevacFromSpace()) {
+ bytes += r->BytesAllocated();
+ }
+ break;
+ case kToSpace:
+ if (r->IsInToSpace()) {
+ bytes += r->BytesAllocated();
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected space type : " << static_cast<int>(kSubSpaceType);
+ }
+ }
+ return bytes;
+}
+
+template<RegionSpace::SubSpaceType kSubSpaceType>
+uint64_t RegionSpace::GetObjectsAllocatedInternal() {
+ uint64_t bytes = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ continue;
+ }
+ switch (kSubSpaceType) {
+ case kAllSpaces:
+ bytes += r->ObjectsAllocated();
+ break;
+ case kFromSpace:
+ if (r->IsInFromSpace()) {
+ bytes += r->ObjectsAllocated();
+ }
+ break;
+ case kUnevacFromSpace:
+ if (r->IsInUnevacFromSpace()) {
+ bytes += r->ObjectsAllocated();
+ }
+ break;
+ case kToSpace:
+ if (r->IsInToSpace()) {
+ bytes += r->ObjectsAllocated();
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected space type : " << static_cast<int>(kSubSpaceType);
+ }
+ }
+ return bytes;
+}
+
+template<bool kToSpaceOnly>
+void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
+ // TODO: MutexLock on region_lock_ won't work due to lock order
+ // issues (the classloader classes lock and the monitor lock). We
+ // call this with threads suspended.
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
+ continue;
+ }
+ if (r->IsLarge()) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
+ if (obj->GetClass() != nullptr) {
+ callback(obj, arg);
+ }
+ } else if (r->IsLargeTail()) {
+ // Do nothing.
+ } else {
+ uint8_t* pos = r->Begin();
+ uint8_t* top = r->Top();
+ while (pos < top) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ callback(obj, arg);
+ pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
+ } else {
+ break;
+ }
+ }
+ }
+ }
+}
+
+inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
+ const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+ return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
+}
+
+template<bool kForEvac>
+mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) {
+ DCHECK(IsAligned<kAlignment>(num_bytes));
+ DCHECK_GT(num_bytes, kRegionSize);
+ size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
+ DCHECK_GT(num_regs, 0U);
+ DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
+ DCHECK_LE(num_bytes, num_regs * kRegionSize);
+ MutexLock mu(Thread::Current(), region_lock_);
+ if (!kForEvac) {
+ // Retain sufficient free regions for full evacuation.
+ if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
+ return nullptr;
+ }
+ }
+ // Find a large enough contiguous free regions.
+ size_t left = 0;
+ while (left + num_regs - 1 < num_regions_) {
+ bool found = true;
+ size_t right = left;
+ DCHECK_LT(right, left + num_regs)
+ << "The inner loop Should iterate at least once";
+ while (right < left + num_regs) {
+ if (regions_[right].IsFree()) {
+ ++right;
+ } else {
+ found = false;
+ break;
+ }
+ }
+ if (found) {
+ // right points to the one region past the last free region.
+ DCHECK_EQ(left + num_regs, right);
+ Region* first_reg = ®ions_[left];
+ DCHECK(first_reg->IsFree());
+ first_reg->UnfreeLarge(time_);
+ ++num_non_free_regions_;
+ first_reg->SetTop(first_reg->Begin() + num_bytes);
+ for (size_t p = left + 1; p < right; ++p) {
+ DCHECK_LT(p, num_regions_);
+ DCHECK(regions_[p].IsFree());
+ regions_[p].UnfreeLargeTail(time_);
+ ++num_non_free_regions_;
+ }
+ *bytes_allocated = num_bytes;
+ if (usable_size != nullptr) {
+ *usable_size = num_regs * kRegionSize;
+ }
+ return reinterpret_cast<mirror::Object*>(first_reg->Begin());
+ } else {
+ // right points to the non-free region. Start with the one after it.
+ left = right + 1;
+ }
+ }
+ return nullptr;
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
new file mode 100644
index 0000000..2ecb79e
--- /dev/null
+++ b/runtime/gc/space/region_space.cc
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bump_pointer_space.h"
+#include "bump_pointer_space-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// If a region has live objects whose size is less than this percent
+// value of the region size, evaculate the region.
+static constexpr uint kEvaculateLivePercentThreshold = 75U;
+
+RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
+ uint8_t* requested_begin) {
+ capacity = RoundUp(capacity, kRegionSize);
+ std::string error_msg;
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
+ PROT_READ | PROT_WRITE, true, &error_msg));
+ if (mem_map.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+ << PrettySize(capacity) << " with message " << error_msg;
+ MemMap::DumpMaps(LOG(ERROR));
+ return nullptr;
+ }
+ return new RegionSpace(name, mem_map.release());
+}
+
+RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
+ : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ kGcRetentionPolicyAlwaysCollect),
+ region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
+ size_t mem_map_size = mem_map->Size();
+ CHECK_ALIGNED(mem_map_size, kRegionSize);
+ CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+ num_regions_ = mem_map_size / kRegionSize;
+ num_non_free_regions_ = 0U;
+ DCHECK_GT(num_regions_, 0U);
+ regions_.reset(new Region[num_regions_]);
+ uint8_t* region_addr = mem_map->Begin();
+ for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
+ regions_[i] = Region(i, region_addr, region_addr + kRegionSize);
+ }
+ if (kIsDebugBuild) {
+ CHECK_EQ(regions_[0].Begin(), Begin());
+ for (size_t i = 0; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
+ if (i + 1 < num_regions_) {
+ CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
+ }
+ }
+ CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
+ }
+ full_region_ = Region();
+ DCHECK(!full_region_.IsFree());
+ DCHECK(full_region_.IsNormal());
+ current_region_ = &full_region_;
+ evac_region_ = nullptr;
+ size_t ignored;
+ DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr) == nullptr);
+}
+
+size_t RegionSpace::FromSpaceSize() {
+ uint64_t num_regions = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInFromSpace()) {
+ ++num_regions;
+ }
+ }
+ return num_regions * kRegionSize;
+}
+
+size_t RegionSpace::UnevacFromSpaceSize() {
+ uint64_t num_regions = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInUnevacFromSpace()) {
+ ++num_regions;
+ }
+ }
+ return num_regions * kRegionSize;
+}
+
+size_t RegionSpace::ToSpaceSize() {
+ uint64_t num_regions = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInToSpace()) {
+ ++num_regions;
+ }
+ }
+ return num_regions * kRegionSize;
+}
+
+inline bool RegionSpace::Region::ShouldBeEvacuated() {
+ DCHECK(state_ == kRegionToSpace || state_ == kRegionLargeToSpace);
+ // if the region was allocated after the start of the
+ // previous GC or the live ratio is below threshold, evacuate
+ // it.
+ bool result;
+ if (is_newly_allocated_) {
+ result = true;
+ } else {
+ bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
+ if (is_live_percent_valid) {
+ uint live_percent = GetLivePercent();
+ if (state_ == kRegionToSpace) {
+ // Side node: live_percent == 0 does not necessarily mean
+ // there's no live objects due to rounding (there may be a
+ // few).
+ result = live_percent < kEvaculateLivePercentThreshold;
+ } else {
+ DCHECK(state_ == kRegionLargeToSpace);
+ result = live_percent == 0U;
+ }
+ } else {
+ result = false;
+ }
+ }
+ return result;
+}
+
+// Determine which regions to evacuate and mark them as
+// from-space. Mark the rest as unevacuated from-space.
+void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
+ ++time_;
+ if (kUseTableLookupReadBarrier) {
+ DCHECK(rb_table->IsAllCleared());
+ rb_table->SetAll();
+ }
+ MutexLock mu(Thread::Current(), region_lock_);
+ size_t num_expected_large_tails = 0;
+ bool prev_large_evacuated = false;
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ RegionState state = static_cast<RegionState>(r->state_);
+ if (!r->IsFree()) {
+ DCHECK(r->IsInToSpace());
+ if (LIKELY(num_expected_large_tails == 0U)) {
+ DCHECK(state == kRegionToSpace || state == kRegionLargeToSpace);
+ bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
+ if (should_evacuate) {
+ r->SetAsFromSpace();
+ DCHECK(r->IsInFromSpace());
+ } else {
+ r->SetAsUnevacFromSpace();
+ DCHECK(r->IsInUnevacFromSpace());
+ }
+ if (UNLIKELY(state == kRegionLargeToSpace)) {
+ prev_large_evacuated = should_evacuate;
+ num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
+ DCHECK_GT(num_expected_large_tails, 0U);
+ }
+ } else {
+ DCHECK(state == kRegionLargeTailToSpace);
+ if (prev_large_evacuated) {
+ r->SetAsFromSpace();
+ DCHECK(r->IsInFromSpace());
+ } else {
+ r->SetAsUnevacFromSpace();
+ DCHECK(r->IsInUnevacFromSpace());
+ }
+ --num_expected_large_tails;
+ }
+ } else {
+ DCHECK_EQ(num_expected_large_tails, 0U);
+ if (kUseTableLookupReadBarrier) {
+ // Clear the rb table for to-space regions.
+ rb_table->Clear(r->Begin(), r->End());
+ }
+ }
+ }
+ current_region_ = &full_region_;
+ evac_region_ = &full_region_;
+}
+
+void RegionSpace::ClearFromSpace() {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInFromSpace()) {
+ r->Clear();
+ --num_non_free_regions_;
+ } else if (r->IsInUnevacFromSpace()) {
+ r->SetUnevacFromSpaceAsToSpace();
+ }
+ }
+ evac_region_ = nullptr;
+}
+
+void RegionSpace::AssertAllRegionLiveBytesZeroOrCleared() {
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ size_t live_bytes = r->LiveBytes();
+ CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
+ }
+ }
+}
+
+void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t /* failed_alloc_bytes */) {
+ size_t max_contiguous_allocation = 0;
+ MutexLock mu(Thread::Current(), region_lock_);
+ if (current_region_->End() - current_region_->Top() > 0) {
+ max_contiguous_allocation = current_region_->End() - current_region_->Top();
+ }
+ if (num_non_free_regions_ * 2 < num_regions_) {
+ // We reserve half of the regions for evaluation only. If we
+ // occupy more than half the regions, do not report the free
+ // regions as available.
+ size_t max_contiguous_free_regions = 0;
+ size_t num_contiguous_free_regions = 0;
+ bool prev_free_region = false;
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ if (!prev_free_region) {
+ CHECK_EQ(num_contiguous_free_regions, 0U);
+ prev_free_region = true;
+ }
+ ++num_contiguous_free_regions;
+ } else {
+ if (prev_free_region) {
+ CHECK_NE(num_contiguous_free_regions, 0U);
+ max_contiguous_free_regions = std::max(max_contiguous_free_regions,
+ num_contiguous_free_regions);
+ num_contiguous_free_regions = 0U;
+ prev_free_region = false;
+ }
+ }
+ }
+ max_contiguous_allocation = std::max(max_contiguous_allocation,
+ max_contiguous_free_regions * kRegionSize);
+ }
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ // Caller's job to print failed_alloc_bytes.
+}
+
+void RegionSpace::Clear() {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (!r->IsFree()) {
+ --num_non_free_regions_;
+ }
+ r->Clear();
+ }
+ current_region_ = &full_region_;
+ evac_region_ = &full_region_;
+}
+
+void RegionSpace::Dump(std::ostream& os) const {
+ os << GetName() << " "
+ << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
+}
+
+void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
+ DCHECK(Contains(large_obj));
+ DCHECK(IsAligned<kRegionSize>(large_obj));
+ MutexLock mu(Thread::Current(), region_lock_);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
+ uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
+ CHECK_LT(begin_addr, end_addr);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(reg->IsLarge());
+ } else {
+ DCHECK(reg->IsLargeTail());
+ }
+ reg->Clear();
+ --num_non_free_regions_;
+ }
+ if (end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_reg->IsLargeTail());
+ }
+}
+
+void RegionSpace::DumpRegions(std::ostream& os) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ regions_[i].Dump(os);
+ }
+}
+
+void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* reg = ®ions_[i];
+ if (!reg->IsFree()) {
+ reg->Dump(os);
+ }
+ }
+}
+
+void RegionSpace::RecordAlloc(mirror::Object* ref) {
+ CHECK(ref != nullptr);
+ Region* r = RefToRegion(ref);
+ reinterpret_cast<Atomic<uint64_t>*>(&r->objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
+}
+
+bool RegionSpace::AllocNewTlab(Thread* self) {
+ MutexLock mu(self, region_lock_);
+ RevokeThreadLocalBuffersLocked(self);
+ // Retain sufficient free regions for full evacuation.
+ if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
+ return false;
+ }
+ for (size_t i = 0; i < num_regions_; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsFree()) {
+ r->Unfree(time_);
+ ++num_non_free_regions_;
+ // TODO: this is buggy. Debug it.
+ // r->SetNewlyAllocated();
+ r->SetTop(r->End());
+ r->is_a_tlab_ = true;
+ r->thread_ = self;
+ self->SetTlab(r->Begin(), r->End());
+ return true;
+ }
+ }
+ return false;
+}
+
+void RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ RevokeThreadLocalBuffersLocked(thread);
+}
+
+void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
+ uint8_t* tlab_start = thread->GetTlabStart();
+ DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
+ if (tlab_start != nullptr) {
+ DCHECK(IsAligned<kRegionSize>(tlab_start));
+ Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
+ DCHECK(r->IsNormal());
+ DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);
+ r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
+ thread->GetThreadLocalBytesAllocated());
+ r->is_a_tlab_ = false;
+ r->thread_ = nullptr;
+ }
+ thread->SetTlab(nullptr, nullptr);
+}
+
+void RegionSpace::RevokeAllThreadLocalBuffers() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(self, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ RevokeThreadLocalBuffers(thread);
+ }
+}
+
+void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ DCHECK(!thread->HasTlab());
+ }
+}
+
+void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
+ if (kIsDebugBuild) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(self, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ }
+}
+
+void RegionSpace::Region::Dump(std::ostream& os) const {
+ os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" << reinterpret_cast<void*>(top_)
+ << "-" << reinterpret_cast<void*>(end_)
+ << " state=" << static_cast<uint>(state_) << " objects_allocated=" << objects_allocated_
+ << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
+ << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
new file mode 100644
index 0000000..b4a043f
--- /dev/null
+++ b/runtime/gc/space/region_space.h
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
+
+#include "object_callbacks.h"
+#include "space.h"
+#include "gc/accounting/read_barrier_table.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// A space that consists of equal-sized regions.
+class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
+ public:
+ typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+ SpaceType GetType() const OVERRIDE {
+ return kSpaceTypeRegionSpace;
+ }
+
+ // Create a region space with the requested sizes. The requested base address is not
+ // guaranteed to be granted, if it is required, the caller should call Begin on the returned
+ // space to confirm the request was granted.
+ static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+
+ // Allocate num_bytes, returns nullptr if the space is full.
+ mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) OVERRIDE;
+ // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
+ mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // The main allocation routine.
+ template<bool kForEvac>
+ ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size);
+ // Allocate/free large objects (objects that are larger than the region size.)
+ template<bool kForEvac>
+ mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size);
+ void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
+
+ // Return the storage space required by obj.
+ size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocationSizeNonvirtual(obj, usable_size);
+ }
+ size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ size_t Free(Thread*, mirror::Object*) OVERRIDE {
+ UNIMPLEMENTED(FATAL);
+ return 0;
+ }
+ size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
+ UNIMPLEMENTED(FATAL);
+ return 0;
+ }
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
+ // No live bitmap.
+ return nullptr;
+ }
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
+ // No mark bitmap.
+ return nullptr;
+ }
+
+ void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_);
+
+ void Dump(std::ostream& os) const;
+ void DumpRegions(std::ostream& os);
+ void DumpNonFreeRegions(std::ostream& os);
+
+ void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
+ void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
+ void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
+ void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
+
+ enum SubSpaceType {
+ kAllSpaces, // All spaces.
+ kFromSpace, // From-space. To be evacuated.
+ kUnevacFromSpace, // Unevacuated from-space. Not to be evacuated.
+ kToSpace, // To-space.
+ };
+
+ template<SubSpaceType kSubSpaceType> uint64_t GetBytesAllocatedInternal();
+ template<SubSpaceType kSubSpaceType> uint64_t GetObjectsAllocatedInternal();
+ uint64_t GetBytesAllocated() {
+ return GetBytesAllocatedInternal<kAllSpaces>();
+ }
+ uint64_t GetObjectsAllocated() {
+ return GetObjectsAllocatedInternal<kAllSpaces>();
+ }
+ uint64_t GetBytesAllocatedInFromSpace() {
+ return GetBytesAllocatedInternal<kFromSpace>();
+ }
+ uint64_t GetObjectsAllocatedInFromSpace() {
+ return GetObjectsAllocatedInternal<kFromSpace>();
+ }
+ uint64_t GetBytesAllocatedInUnevacFromSpace() {
+ return GetBytesAllocatedInternal<kUnevacFromSpace>();
+ }
+ uint64_t GetObjectsAllocatedInUnevacFromSpace() {
+ return GetObjectsAllocatedInternal<kUnevacFromSpace>();
+ }
+
+ bool CanMoveObjects() const OVERRIDE {
+ return true;
+ }
+
+ bool Contains(const mirror::Object* obj) const {
+ const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
+ return byte_obj >= Begin() && byte_obj < Limit();
+ }
+
+ RegionSpace* AsRegionSpace() OVERRIDE {
+ return this;
+ }
+
+ // Go through all of the blocks and visit the continuous objects.
+ void Walk(ObjectCallback* callback, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ WalkInternal<false>(callback, arg);
+ }
+
+ void WalkToSpace(ObjectCallback* callback, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ WalkInternal<true>(callback, arg);
+ }
+
+ accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
+ return nullptr;
+ }
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Object alignment within the space.
+ static constexpr size_t kAlignment = kObjectAlignment;
+ // The region size.
+ static constexpr size_t kRegionSize = 1 * MB;
+
+ bool IsInFromSpace(mirror::Object* ref) {
+ if (HasAddress(ref)) {
+ Region* r = RefToRegionUnlocked(ref);
+ return r->IsInFromSpace();
+ }
+ return false;
+ }
+
+ bool IsInUnevacFromSpace(mirror::Object* ref) {
+ if (HasAddress(ref)) {
+ Region* r = RefToRegionUnlocked(ref);
+ return r->IsInUnevacFromSpace();
+ }
+ return false;
+ }
+
+ bool IsInToSpace(mirror::Object* ref) {
+ if (HasAddress(ref)) {
+ Region* r = RefToRegionUnlocked(ref);
+ return r->IsInToSpace();
+ }
+ return false;
+ }
+
+ void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
+ LOCKS_EXCLUDED(region_lock_);
+
+ size_t FromSpaceSize();
+ size_t UnevacFromSpaceSize();
+ size_t ToSpaceSize();
+ void ClearFromSpace();
+
+ void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
+ Region* reg = RefToRegion(ref);
+ reg->AddLiveBytes(alloc_size);
+ }
+
+ void AssertAllRegionLiveBytesZeroOrCleared();
+
+ void RecordAlloc(mirror::Object* ref);
+ bool AllocNewTlab(Thread* self);
+
+ uint32_t Time() {
+ return time_;
+ }
+
+ private:
+ RegionSpace(const std::string& name, MemMap* mem_map);
+
+ template<bool kToSpaceOnly>
+ void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
+
+ enum RegionState {
+ kRegionFree, // Free region.
+ kRegionToSpace, // To-space region.
+ kRegionFromSpace, // From-space region. To be evacuated.
+ kRegionUnevacFromSpace, // Unevacuated from-space region. Not to be evacuated.
+ kRegionLargeToSpace, // Large (allocation larger than the region size) to-space.
+ kRegionLargeFromSpace, // Large from-space. To be evacuated.
+ kRegionLargeUnevacFromSpace, // Large unevacuated from-space.
+ kRegionLargeTailToSpace, // Large tail (non-first regions of a large allocation).
+ kRegionLargeTailFromSpace, // Large tail from-space.
+ kRegionLargeTailUnevacFromSpace, // Large tail unevacuated from-space.
+ };
+
+ class Region {
+ public:
+ Region()
+ : idx_(static_cast<size_t>(-1)),
+ begin_(nullptr), top_(nullptr), end_(nullptr), state_(kRegionToSpace),
+ objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
+ is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
+
+ Region(size_t idx, uint8_t* begin, uint8_t* end)
+ : idx_(idx), begin_(begin), top_(begin), end_(end), state_(kRegionFree),
+ objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
+ is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
+ DCHECK_LT(begin, end);
+ DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
+ }
+
+ void Clear() {
+ top_ = begin_;
+ state_ = kRegionFree;
+ objects_allocated_ = 0;
+ alloc_time_ = 0;
+ live_bytes_ = static_cast<size_t>(-1);
+ if (!kMadviseZeroes) {
+ memset(begin_, 0, end_ - begin_);
+ }
+ madvise(begin_, end_ - begin_, MADV_DONTNEED);
+ is_newly_allocated_ = false;
+ is_a_tlab_ = false;
+ thread_ = nullptr;
+ }
+
+ ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size);
+
+ bool IsFree() const {
+ bool is_free = state_ == kRegionFree;
+ if (is_free) {
+ DCHECK_EQ(begin_, top_);
+ DCHECK_EQ(objects_allocated_, 0U);
+ }
+ return is_free;
+ }
+
+ // Given a free region, declare it non-free (allocated).
+ void Unfree(uint32_t alloc_time) {
+ DCHECK(IsFree());
+ state_ = kRegionToSpace;
+ alloc_time_ = alloc_time;
+ }
+
+ void UnfreeLarge(uint32_t alloc_time) {
+ DCHECK(IsFree());
+ state_ = kRegionLargeToSpace;
+ alloc_time_ = alloc_time;
+ }
+
+ void UnfreeLargeTail(uint32_t alloc_time) {
+ DCHECK(IsFree());
+ state_ = kRegionLargeTailToSpace;
+ alloc_time_ = alloc_time;
+ }
+
+ void SetNewlyAllocated() {
+ is_newly_allocated_ = true;
+ }
+
+ // Non-large, non-large-tail.
+ bool IsNormal() const {
+ return state_ == kRegionToSpace || state_ == kRegionFromSpace ||
+ state_ == kRegionUnevacFromSpace;
+ }
+
+ bool IsLarge() const {
+ bool is_large = state_ == kRegionLargeToSpace || state_ == kRegionLargeFromSpace ||
+ state_ == kRegionLargeUnevacFromSpace;
+ if (is_large) {
+ DCHECK_LT(begin_ + 1 * MB, top_);
+ }
+ return is_large;
+ }
+
+ bool IsLargeTail() const {
+ bool is_large_tail = state_ == kRegionLargeTailToSpace ||
+ state_ == kRegionLargeTailFromSpace ||
+ state_ == kRegionLargeTailUnevacFromSpace;
+ if (is_large_tail) {
+ DCHECK_EQ(begin_, top_);
+ }
+ return is_large_tail;
+ }
+
+ size_t Idx() const {
+ return idx_;
+ }
+
+ bool IsInFromSpace() const {
+ return state_ == kRegionFromSpace || state_ == kRegionLargeFromSpace ||
+ state_ == kRegionLargeTailFromSpace;
+ }
+
+ bool IsInToSpace() const {
+ return state_ == kRegionToSpace || state_ == kRegionLargeToSpace ||
+ state_ == kRegionLargeTailToSpace;
+ }
+
+ bool IsInUnevacFromSpace() const {
+ return state_ == kRegionUnevacFromSpace || state_ == kRegionLargeUnevacFromSpace ||
+ state_ == kRegionLargeTailUnevacFromSpace;
+ }
+
+ void SetAsFromSpace() {
+ switch (state_) {
+ case kRegionToSpace:
+ state_ = kRegionFromSpace;
+ break;
+ case kRegionLargeToSpace:
+ state_ = kRegionLargeFromSpace;
+ break;
+ case kRegionLargeTailToSpace:
+ state_ = kRegionLargeTailFromSpace;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
+ << " idx=" << idx_;
+ }
+ live_bytes_ = static_cast<size_t>(-1);
+ }
+
+ void SetAsUnevacFromSpace() {
+ switch (state_) {
+ case kRegionToSpace:
+ state_ = kRegionUnevacFromSpace;
+ break;
+ case kRegionLargeToSpace:
+ state_ = kRegionLargeUnevacFromSpace;
+ break;
+ case kRegionLargeTailToSpace:
+ state_ = kRegionLargeTailUnevacFromSpace;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
+ << " idx=" << idx_;
+ }
+ live_bytes_ = 0U;
+ }
+
+ void SetUnevacFromSpaceAsToSpace() {
+ switch (state_) {
+ case kRegionUnevacFromSpace:
+ state_ = kRegionToSpace;
+ break;
+ case kRegionLargeUnevacFromSpace:
+ state_ = kRegionLargeToSpace;
+ break;
+ case kRegionLargeTailUnevacFromSpace:
+ state_ = kRegionLargeTailToSpace;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected region state : " << static_cast<uint>(state_)
+ << " idx=" << idx_;
+ }
+ }
+
+ ALWAYS_INLINE bool ShouldBeEvacuated();
+
+ void AddLiveBytes(size_t live_bytes) {
+ DCHECK(IsInUnevacFromSpace());
+ DCHECK(!IsLargeTail());
+ DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
+ live_bytes_ += live_bytes;
+ DCHECK_LE(live_bytes_, BytesAllocated());
+ }
+
+ size_t LiveBytes() const {
+ return live_bytes_;
+ }
+
+ uint GetLivePercent() const {
+ DCHECK(IsInToSpace());
+ DCHECK(!IsLargeTail());
+ DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
+ DCHECK_LE(live_bytes_, BytesAllocated());
+ size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
+ DCHECK_GE(bytes_allocated, 0U);
+ uint result = (live_bytes_ * 100U) / bytes_allocated;
+ DCHECK_LE(result, 100U);
+ return result;
+ }
+
+ size_t BytesAllocated() const {
+ if (IsLarge()) {
+ DCHECK_LT(begin_ + kRegionSize, top_);
+ return static_cast<size_t>(top_ - begin_);
+ } else if (IsLargeTail()) {
+ DCHECK_EQ(begin_, top_);
+ return 0;
+ } else {
+ DCHECK(IsNormal()) << static_cast<uint>(state_);
+ DCHECK_LE(begin_, top_);
+ size_t bytes = static_cast<size_t>(top_ - begin_);
+ DCHECK_LE(bytes, kRegionSize);
+ return bytes;
+ }
+ }
+
+ size_t ObjectsAllocated() const {
+ if (IsLarge()) {
+ DCHECK_LT(begin_ + 1 * MB, top_);
+ DCHECK_EQ(objects_allocated_, 0U);
+ return 1;
+ } else if (IsLargeTail()) {
+ DCHECK_EQ(begin_, top_);
+ DCHECK_EQ(objects_allocated_, 0U);
+ return 0;
+ } else {
+ DCHECK(IsNormal()) << static_cast<uint>(state_);
+ return objects_allocated_;
+ }
+ }
+
+ uint8_t* Begin() const {
+ return begin_;
+ }
+
+ uint8_t* Top() const {
+ return top_;
+ }
+
+ void SetTop(uint8_t* new_top) {
+ top_ = new_top;
+ }
+
+ uint8_t* End() const {
+ return end_;
+ }
+
+ bool Contains(mirror::Object* ref) const {
+ return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
+ }
+
+ void Dump(std::ostream& os) const;
+
+ void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
+ DCHECK(IsNormal());
+ DCHECK_EQ(objects_allocated_, 0U);
+ DCHECK_EQ(top_, end_);
+ objects_allocated_ = num_objects;
+ top_ = begin_ + num_bytes;
+ DCHECK_EQ(top_, end_);
+ }
+
+ private:
+ size_t idx_; // The region's index in the region space.
+ uint8_t* begin_; // The begin address of the region.
+ // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
+ uint8_t* top_; // The current position of the allocation.
+ uint8_t* end_; // The end address of the region.
+ uint8_t state_; // The region state (see RegionState).
+ uint64_t objects_allocated_; // The number of objects allocated.
+ uint32_t alloc_time_; // The allocation time of the region.
+ size_t live_bytes_; // The live bytes. Used to compute the live percent.
+ bool is_newly_allocated_; // True if it's allocated after the last collection.
+ bool is_a_tlab_; // True if it's a tlab.
+ Thread* thread_; // The owning thread if it's a tlab.
+
+ friend class RegionSpace;
+ };
+
+ Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ return RefToRegionLocked(ref);
+ }
+
+ Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ // For a performance reason (this is frequently called via
+ // IsInFromSpace() etc.) we avoid taking a lock here. Note that
+ // since we only change a region from to-space to from-space only
+ // during a pause (SetFromSpace()) and from from-space to free
+ // (after GC is done) as long as ref is a valid reference into an
+ // allocated region, it's safe to access the region state without
+ // the lock.
+ return RefToRegionLocked(ref);
+ }
+
+ Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) {
+ DCHECK(HasAddress(ref));
+ uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+ size_t reg_idx = offset / kRegionSize;
+ DCHECK_LT(reg_idx, num_regions_);
+ Region* reg = ®ions_[reg_idx];
+ DCHECK_EQ(reg->Idx(), reg_idx);
+ DCHECK(reg->Contains(ref));
+ return reg;
+ }
+
+ mirror::Object* GetNextObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ uint32_t time_; // The time as the number of collections since the startup.
+ size_t num_regions_; // The number of regions in this space.
+ size_t num_non_free_regions_; // The number of non-free regions in this space.
+ std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
+ // The pointer to the region array.
+ Region* current_region_; // The region that's being allocated currently.
+ Region* evac_region_; // The region that's being evacuated to currently.
+ Region full_region_; // The dummy/sentinel region that looks full.
+
+ DISALLOW_COPY_AND_ASSIGN(RegionSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 486d79a..a2e2c1c 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -58,6 +58,11 @@
UNREACHABLE();
}
+RegionSpace* Space::AsRegionSpace() {
+ LOG(FATAL) << "Unreachable";
+ return nullptr;
+}
+
AllocSpace* Space::AsAllocSpace() {
UNIMPLEMENTED(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 860a4c9..d24650b 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -50,6 +50,7 @@
class RosAllocSpace;
class ImageSpace;
class LargeObjectSpace;
+class RegionSpace;
class ZygoteSpace;
static constexpr bool kDebugSpaces = kIsDebugBuild;
@@ -72,6 +73,7 @@
kSpaceTypeZygoteSpace,
kSpaceTypeBumpPointerSpace,
kSpaceTypeLargeObjectSpace,
+ kSpaceTypeRegionSpace,
};
std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
@@ -132,6 +134,11 @@
}
virtual BumpPointerSpace* AsBumpPointerSpace();
+ bool IsRegionSpace() const {
+ return GetType() == kSpaceTypeRegionSpace;
+ }
+ virtual RegionSpace* AsRegionSpace();
+
// Does this space hold large objects and implement the large object space abstraction?
bool IsLargeObjectSpace() const {
return GetType() == kSpaceTypeLargeObjectSpace;