More of the concurrent copying collector.
Bug: 12687968
Change-Id: I62f70274d47df6d6cab714df95c518b750ce3105
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 34c15c7..8fcc87d 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -105,6 +105,15 @@
return nullptr;
}
+inline LargeObjectBitmap* HeapBitmap::GetLargeObjectBitmap(const mirror::Object* obj) const {
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap;
+ }
+ }
+ return nullptr;
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index ca6dc46..245e074 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -27,6 +27,10 @@
class Heap;
+namespace collector {
+ class ConcurrentCopying;
+} // namespace collector
+
namespace accounting {
class HeapBitmap {
@@ -40,6 +44,7 @@
bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
+ LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -78,6 +83,7 @@
large_object_bitmaps_;
friend class art::gc::Heap;
+ friend class art::gc::collector::ConcurrentCopying;
};
} // namespace accounting
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
new file mode 100644
index 0000000..84d5da3
--- /dev/null
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_
+#define ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_
+
+#include "base/mutex.h"
+#include "gc/space/space.h"
+#include "globals.h"
+#include "mem_map.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+// Used to decide whether to take the read barrier fast/slow paths for
+// kUseTableLookupReadBarrier. If an entry is set, take the read
+// barrier slow path. There's an entry per region.
+class ReadBarrierTable {
+ public:
+ ReadBarrierTable() {
+ size_t capacity = static_cast<size_t>(kHeapCapacity / kRegionSize);
+ DCHECK_EQ(kHeapCapacity / kRegionSize,
+ static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
+ std::string error_msg;
+ MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
+ PROT_READ | PROT_WRITE, false, &error_msg);
+ CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
+ << "couldn't allocate read barrier table: " << error_msg;
+ mem_map_.reset(mem_map);
+ }
+ void ClearForSpace(space::ContinuousSpace* space) {
+ uint8_t* entry_start = EntryFromAddr(space->Begin());
+ uint8_t* entry_end = EntryFromAddr(space->Limit());
+ memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
+ }
+ void Clear(uint8_t* start_addr, uint8_t* end_addr) {
+ DCHECK(IsValidHeapAddr(start_addr)) << start_addr;
+ DCHECK(IsValidHeapAddr(end_addr)) << end_addr;
+ DCHECK(IsAligned<kRegionSize>(start_addr));
+ DCHECK(IsAligned<kRegionSize>(end_addr));
+ uint8_t* entry_start = EntryFromAddr(start_addr);
+ uint8_t* entry_end = EntryFromAddr(end_addr);
+ memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
+ }
+ bool IsSet(const void* heap_addr) const {
+ DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
+ uint8_t entry_value = *EntryFromAddr(heap_addr);
+ DCHECK(entry_value == 0 || entry_value == kSetEntryValue);
+ return entry_value == kSetEntryValue;
+ }
+ void ClearAll() {
+ mem_map_->MadviseDontNeedAndZero();
+ }
+ void SetAll() {
+ memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
+ }
+ bool IsAllCleared() const {
+ for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
+ p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
+ if (*p != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
+ static constexpr size_t kRegionSize = 1 * MB;
+
+ private:
+ static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
+ static constexpr uint8_t kSetEntryValue = 0x01;
+
+ uint8_t* EntryFromAddr(const void* heap_addr) const {
+ DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
+ uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
+ DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
+ << " entry_addr: " << reinterpret_cast<void*>(entry_addr);
+ return entry_addr;
+ }
+
+ bool IsValidHeapAddr(const void* heap_addr) const {
+#ifdef __LP64__
+ return reinterpret_cast<uint64_t>(heap_addr) < kHeapCapacity;
+#else
+ UNUSED(heap_addr);
+ return true;
+#endif
+ }
+
+ bool IsValidEntry(const uint8_t* entry_addr) const {
+ uint8_t* begin = mem_map_->Begin();
+ uint8_t* end = mem_map_->End();
+ return entry_addr >= begin && entry_addr < end;
+ }
+
+ std::unique_ptr<MemMap> mem_map_;
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_