GC clean up.
Greater use of directories and namespaces.
Fix bugs that cause verify options to fail.
Address numerous other issues:
GC barrier wait occurring holding locks:
GC barrier waits occur when we wait for threads to run the check point function
on themselves. This is happening with the heap bitmap and mutator lock held
meaning that a thread that tries to take either lock exclusively will block
waiting on a thread that is waiting. If this thread is the thread we're waiting
to run the check point then the VM will deadlock.
This deadlock occurred unnoticed as the call to check for wait safety was
removed in: https://googleplex-android-review.googlesource.com/#/c/249423/1.
NewTimingLogger:
Existing timing log states when a split ends but not when it begins. This isn't
good for systrace, in the context of GC it means that races between mutators
and the GC are hard to discover what phase the GC is in, we know what phase it
just finished and derive but that's not ideal.
Support for only 1 discontinuous space:
Code special cases continuous and large object space, rather than assuming we
can have a collection of both.
Sorted atomic stacks:
Used to improve verification performance. Simplify their use and add extra
checks.
Simplify mod-union table abstractions.
Reduce use of std::strings and their associated overhead in hot code.
Make time units of fields explicit.
Reduce confusion that IsAllocSpace is really IsDlMallocSpace.
Make GetTotalMemory (exposed via System) equal to the footprint (as in Dalvik)
rather than the max memory footprint.
Change-Id: Ie87067140fa4499b15edab691fe6565d79599812
diff --git a/src/gc/atomic_stack.h b/src/gc/accounting/atomic_stack.h
similarity index 80%
rename from src/gc/atomic_stack.h
rename to src/gc/accounting/atomic_stack.h
index 0197bce..4e1c253 100644
--- a/src/gc/atomic_stack.h
+++ b/src/gc/accounting/atomic_stack.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_SRC_ATOMIC_STACK_H_
-#define ART_SRC_ATOMIC_STACK_H_
+#ifndef ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
+#define ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
#include <string>
@@ -27,6 +27,8 @@
#include "utils.h"
namespace art {
+namespace gc {
+namespace accounting {
template <typename T>
class AtomicStack {
@@ -38,15 +40,14 @@
return mark_stack.release();
}
- ~AtomicStack(){
-
- }
+ ~AtomicStack() {}
void Reset() {
DCHECK(mem_map_.get() != NULL);
DCHECK(begin_ != NULL);
front_index_ = 0;
back_index_ = 0;
+ is_sorted_ = true;
int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
if (result == -1) {
PLOG(WARNING) << "madvise failed";
@@ -58,6 +59,7 @@
// Returns false if we overflowed the stack.
bool AtomicPushBack(const T& value) {
int32_t index;
+ is_sorted_ = false;
do {
index = back_index_;
if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
@@ -70,6 +72,7 @@
}
void PushBack(const T& value) {
+ is_sorted_ = false;
int32_t index = back_index_;
DCHECK_LT(static_cast<size_t>(index), capacity_);
back_index_ = index + 1;
@@ -100,11 +103,11 @@
return back_index_ - front_index_;
}
- T* Begin() {
+ T* Begin() const {
return const_cast<mirror::Object**>(begin_ + front_index_);
}
- T* End() {
+ T* End() const {
return const_cast<mirror::Object**>(begin_ + back_index_);
}
@@ -118,14 +121,33 @@
Init();
}
+ void Sort() {
+ if (!is_sorted_) {
+ int32_t start_back_index = back_index_.get();
+ int32_t start_front_index = front_index_.get();
+ is_sorted_ = true;
+ std::sort(Begin(), End());
+ CHECK_EQ(start_back_index, back_index_.get());
+ CHECK_EQ(start_front_index, front_index_.get());
+ }
+ }
+
+ bool Contains(const T& value) const {
+ if (is_sorted_) {
+ return std::binary_search(Begin(), End(), value);
+ } else {
+ return std::find(Begin(), End(), value) != End();
+ }
+ }
+
private:
AtomicStack(const std::string& name, const size_t capacity)
: name_(name),
back_index_(0),
front_index_(0),
begin_(NULL),
- capacity_(capacity) {
-
+ capacity_(capacity),
+ is_sorted_(true) {
}
// Size in number of elements.
@@ -156,11 +178,15 @@
// Maximum number of elements.
size_t capacity_;
+ bool is_sorted_;
+
DISALLOW_COPY_AND_ASSIGN(AtomicStack);
};
typedef AtomicStack<mirror::Object*> ObjectStack;
+} // namespace accounting
+} // namespace gc
} // namespace art
-#endif // ART_SRC_MARK_STACK_H_
+#endif // ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
diff --git a/src/gc/card_table-inl.h b/src/gc/accounting/card_table-inl.h
similarity index 98%
rename from src/gc/card_table-inl.h
rename to src/gc/accounting/card_table-inl.h
index 13590b7..1e75290 100644
--- a/src/gc/card_table-inl.h
+++ b/src/gc/accounting/card_table-inl.h
@@ -24,6 +24,8 @@
#include "utils.h"
namespace art {
+namespace gc {
+namespace accounting {
static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
// Little endian means most significant byte is on the left.
@@ -204,6 +206,8 @@
<< " end: " << reinterpret_cast<void*>(mem_map_->End());
}
+} // namespace accounting
+} // namespace gc
} // namespace art
#endif // ART_SRC_GC_CARDTABLE_INL_H_
diff --git a/src/gc/card_table.cc b/src/gc/accounting/card_table.cc
similarity index 95%
rename from src/gc/card_table.cc
rename to src/gc/accounting/card_table.cc
index 57824e9..4f2ae26 100644
--- a/src/gc/card_table.cc
+++ b/src/gc/accounting/card_table.cc
@@ -17,14 +17,17 @@
#include "card_table.h"
#include "base/logging.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "card_table-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
#include "heap_bitmap.h"
#include "runtime.h"
-#include "space.h"
#include "utils.h"
namespace art {
+namespace gc {
+namespace accounting {
+
/*
* Maintain a card table from the write barrier. All writes of
* non-NULL values to heap addresses should go through an entry in
@@ -82,7 +85,7 @@
byte* __attribute__((unused)) end = mem_map_->End();
}
-void CardTable::ClearSpaceCards(ContinuousSpace* space) {
+void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
// TODO: clear just the range of the table that has been modified
byte* card_start = CardFromAddr(space->Begin());
byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
@@ -116,4 +119,6 @@
UNIMPLEMENTED(WARNING) << "Card table verification";
}
+} // namespace accounting
+} // namespace gc
} // namespace art
diff --git a/src/gc/card_table.h b/src/gc/accounting/card_table.h
similarity index 95%
rename from src/gc/card_table.h
rename to src/gc/accounting/card_table.h
index 842fcc3..cf85d15 100644
--- a/src/gc/card_table.h
+++ b/src/gc/accounting/card_table.h
@@ -23,11 +23,21 @@
#include "UniquePtr.h"
namespace art {
+
namespace mirror {
-class Object;
+ class Object;
} // namespace mirror
+
+namespace gc {
+
+namespace space {
+ class ContinuousSpace;
+} // namespace space
+
class Heap;
-class ContinuousSpace;
+
+namespace accounting {
+
class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
@@ -105,7 +115,7 @@
void ClearCardTable();
// Resets all of the bytes in the card table which do not map to the image space.
- void ClearSpaceCards(ContinuousSpace* space);
+ void ClearSpaceCards(space::ContinuousSpace* space);
// Returns the first address in the heap which maps to this card.
void* AddrFromCard(const byte *card_addr) const;
@@ -139,5 +149,8 @@
const size_t offset_;
};
+} // namespace accounting
+} // namespace gc
} // namespace art
+
#endif // ART_SRC_GC_CARDTABLE_H_
diff --git a/src/gc/accounting/heap_bitmap-inl.h b/src/gc/accounting/heap_bitmap-inl.h
new file mode 100644
index 0000000..8e3123b
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap-inl.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
+#define ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
+
+#include "heap_bitmap.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+template <typename Visitor>
+inline void HeapBitmap::Visit(const Visitor& visitor) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceBitmap*>::iterator It;
+ for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+ it != end; ++it) {
+ SpaceBitmap* bitmap = *it;
+ bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
+ }
+ // TODO: C++0x auto
+ typedef std::vector<SpaceSetMap*>::iterator It2;
+ DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
+ for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+ it != end; ++it) {
+ SpaceSetMap* set = *it;
+ set->Visit(visitor);
+ }
+
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
diff --git a/src/gc/accounting/heap_bitmap.cc b/src/gc/accounting/heap_bitmap.cc
new file mode 100644
index 0000000..1bdc978
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap_bitmap.h"
+
+#include "gc/space/space.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceBitmap*>::iterator It;
+ for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+ it != end; ++it) {
+ if (*it == old_bitmap) {
+ *it = new_bitmap;
+ return;
+ }
+ }
+ LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+}
+
+void HeapBitmap::ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceSetMap*>::iterator It;
+ for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+ it != end; ++it) {
+ if (*it == old_set) {
+ *it = new_set;
+ return;
+ }
+ }
+ LOG(FATAL) << "object set " << static_cast<const void*>(old_set) << " not found";
+}
+
+void HeapBitmap::AddContinuousSpaceBitmap(accounting::SpaceBitmap* bitmap) {
+ DCHECK(bitmap != NULL);
+
+ // Check for interval overlap.
+ typedef std::vector<SpaceBitmap*>::iterator It;
+ for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+ it != end; ++it) {
+ SpaceBitmap* bitmap = *it;
+ SpaceBitmap* cur_bitmap = *it;
+ CHECK(bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
+ bitmap->HeapLimit() > cur_bitmap->HeapBegin())
+ << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
+ }
+ continuous_space_bitmaps_.push_back(bitmap);
+}
+
+void HeapBitmap::AddDiscontinuousObjectSet(SpaceSetMap* set) {
+ DCHECK(set != NULL);
+ discontinuous_space_sets_.push_back(set);
+}
+
+void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceBitmap*>::iterator It;
+ for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+ it != end; ++it) {
+ SpaceBitmap* bitmap = *it;
+ bitmap->Walk(callback, arg);
+ }
+ // TODO: C++0x auto
+ typedef std::vector<SpaceSetMap*>::iterator It2;
+ DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
+ for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+ it != end; ++it) {
+ SpaceSetMap* set = *it;
+ set->Walk(callback, arg);
+ }
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/src/gc/accounting/heap_bitmap.h b/src/gc/accounting/heap_bitmap.h
new file mode 100644
index 0000000..5ff40c6
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
+#define ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
+
+#include "base/logging.h"
+#include "locks.h"
+#include "space_bitmap.h"
+
+namespace art {
+namespace gc {
+
+class Heap;
+
+namespace accounting {
+
+class HeapBitmap {
+ public:
+ bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ return bitmap->Test(obj);
+ } else {
+ return GetDiscontinuousSpaceObjectSet(obj) != NULL;
+ }
+ }
+
+ void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Clear(obj);
+ } else {
+ SpaceSetMap* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Clear(obj);
+ }
+ }
+
+ void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Set(obj);
+ } else {
+ SpaceSetMap* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Set(obj);
+ }
+ }
+
+ SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceBitmap*>::iterator It;
+ for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+ it != end; ++it) {
+ SpaceBitmap* bitmap = *it;
+ if (bitmap->HasAddress(obj)) {
+ return bitmap;
+ }
+ }
+ return NULL;
+ }
+
+ SpaceSetMap* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
+ // TODO: C++0x auto
+ typedef std::vector<SpaceSetMap*>::iterator It;
+ for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+ it != end; ++it) {
+ SpaceSetMap* set = *it;
+ if (set->Test(obj)) {
+ return set;
+ }
+ }
+ return NULL;
+ }
+
+ void Walk(SpaceBitmap::Callback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ template <typename Visitor>
+ void Visit(const Visitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
+ void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
+ void ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ HeapBitmap(Heap* heap) : heap_(heap) {
+ }
+
+ private:
+
+ const Heap* const heap_;
+
+ void AddContinuousSpaceBitmap(SpaceBitmap* bitmap);
+ void AddDiscontinuousObjectSet(SpaceSetMap* set);
+
+ // Bitmaps covering continuous spaces.
+ std::vector<SpaceBitmap*> continuous_space_bitmaps_;
+
+ // Sets covering discontinuous spaces.
+ std::vector<SpaceSetMap*> discontinuous_space_sets_;
+
+ friend class art::gc::Heap;
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
diff --git a/src/gc/accounting/mod_union_table-inl.h b/src/gc/accounting/mod_union_table-inl.h
new file mode 100644
index 0000000..656af94
--- /dev/null
+++ b/src/gc/accounting/mod_union_table-inl.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+
+#include "mod_union_table.h"
+
+#include "gc/space/space.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+// A mod-union table to record image references to the Zygote and alloc space.
+class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
+public:
+ ModUnionTableToZygoteAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {
+ }
+
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->Contains(ref)) {
+ return (*it)->IsDlMallocSpace();
+ }
+ }
+ // Assume it points to a large object.
+ // TODO: Check.
+ return true;
+ }
+};
+
+// A mod-union table to record Zygote references to the alloc space.
+class ModUnionTableToAllocspace : public ModUnionTableReferenceCache {
+ public:
+ ModUnionTableToAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {
+ }
+
+ bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(); it != spaces.end(); ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->Contains(ref)) {
+ // The allocation space is always considered for collection whereas the Zygote space is
+ //
+ return space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
+ }
+ }
+ // Assume it points to a large object.
+ // TODO: Check.
+ return true;
+ }
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/accounting/mod_union_table.cc b/src/gc/accounting/mod_union_table.cc
new file mode 100644
index 0000000..05b68c4
--- /dev/null
+++ b/src/gc/accounting/mod_union_table.cc
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mod_union_table.h"
+
+#include "base/stl_util.h"
+#include "card_table-inl.h"
+#include "heap_bitmap.h"
+#include "gc/collector/mark_sweep-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object_array-inl.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+using namespace art::mirror;
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+class MarkIfReachesAllocspaceVisitor {
+ public:
+ explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
+ : heap_(heap),
+ bitmap_(bitmap) {
+ }
+
+ // Extra parameters are required since we use this same visitor signature for checking objects.
+ void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const {
+ // TODO: Optimize?
+ // TODO: C++0x auto
+ const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It cur = spaces.begin(); cur != spaces.end(); ++cur) {
+ if ((*cur)->IsDlMallocSpace() && (*cur)->Contains(ref)) {
+ bitmap_->Set(obj);
+ break;
+ }
+ }
+ }
+
+ private:
+ Heap* const heap_;
+ accounting::SpaceBitmap* const bitmap_;
+};
+
+class ModUnionVisitor {
+ public:
+ explicit ModUnionVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
+ : heap_(heap),
+ bitmap_(bitmap) {
+ }
+
+ void operator ()(const Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+ Locks::mutator_lock_) {
+ DCHECK(obj != NULL);
+ // We don't have an early exit since we use the visitor pattern, an early exit should
+ // significantly speed this up.
+ MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ }
+ private:
+ Heap* const heap_;
+ accounting::SpaceBitmap* const bitmap_;
+};
+
+class ModUnionClearCardSetVisitor {
+ public:
+ explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
+ : cleared_cards_(cleared_cards) {
+ }
+
+ inline void operator ()(byte* card, byte expected_value, byte new_value) const {
+ if (expected_value == CardTable::kCardDirty) {
+ cleared_cards_->insert(card);
+ }
+ }
+
+ private:
+ std::set<byte*>* const cleared_cards_;
+};
+
+class ModUnionClearCardVisitor {
+ public:
+ explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+ : cleared_cards_(cleared_cards) {
+ }
+
+ void operator ()(byte* card, byte expected_card, byte new_card) const {
+ if (expected_card == CardTable::kCardDirty) {
+ cleared_cards_->push_back(card);
+ }
+ }
+ private:
+ std::vector<byte*>* const cleared_cards_;
+};
+
+class ModUnionScanImageRootVisitor {
+ public:
+ ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+ }
+
+ void operator ()(const Object* root) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(root != NULL);
+ mark_sweep_->ScanRoot(root);
+ }
+
+ private:
+ collector::MarkSweep* const mark_sweep_;
+};
+
+void ModUnionTableReferenceCache::ClearCards(space::ContinuousSpace* space) {
+ CardTable* card_table = GetHeap()->GetCardTable();
+ ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ // Clear dirty cards in the this space and update the corresponding mod-union bits.
+ card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+}
+
+class AddToReferenceArrayVisitor {
+ public:
+ explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
+ std::vector<const mirror::Object*>* references)
+ : mod_union_table_(mod_union_table),
+ references_(references) {
+ }
+
+ // Extra parameters are required since we use this same visitor signature for checking objects.
+ void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const {
+ // Only add the reference if it is non null and fits our criteria.
+ if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
+ references_->push_back(ref);
+ }
+ }
+
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ std::vector<const mirror::Object*>* const references_;
+};
+
+class ModUnionReferenceVisitor {
+ public:
+ explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
+ std::vector<const mirror::Object*>* references)
+ : mod_union_table_(mod_union_table),
+ references_(references) {
+ }
+
+ void operator ()(const Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ DCHECK(obj != NULL);
+ // We don't have an early exit since we use the visitor pattern, an early
+ // exit should significantly speed this up.
+ AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ }
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ std::vector<const mirror::Object*>* const references_;
+};
+
+class CheckReferenceVisitor {
+ public:
+ explicit CheckReferenceVisitor(ModUnionTableReferenceCache* mod_union_table,
+ const std::set<const Object*>& references)
+ : mod_union_table_(mod_union_table),
+ references_(references) {
+ }
+
+ // Extra parameters are required since we use this same visitor signature for checking objects.
+ // TODO: Fixme when anotatalysis works with visitors.
+ void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ Heap* heap = mod_union_table_->GetHeap();
+ if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
+ references_.find(ref) == references_.end()) {
+ space::ContinuousSpace* from_space = heap->FindContinuousSpaceFromObject(obj, false);
+ space::ContinuousSpace* to_space = heap->FindContinuousSpaceFromObject(ref, false);
+ LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
+ << "References " << reinterpret_cast<const void*>(ref)
+ << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
+ LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
+ LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
+ mod_union_table_->GetHeap()->DumpSpaces();
+ LOG(FATAL) << "FATAL ERROR";
+ }
+ }
+
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ const std::set<const Object*>& references_;
+};
+
+class ModUnionCheckReferences {
+ public:
+ explicit ModUnionCheckReferences (ModUnionTableReferenceCache* mod_union_table,
+ const std::set<const Object*>& references)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ : mod_union_table_(mod_union_table), references_(references) {
+ }
+
+ void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+ DCHECK(obj != NULL);
+ CheckReferenceVisitor visitor(mod_union_table_, references_);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ }
+
+ private:
+ ModUnionTableReferenceCache* const mod_union_table_;
+ const std::set<const Object*>& references_;
+};
+
+void ModUnionTableReferenceCache::Verify() {
+ // Start by checking that everything in the mod union table is marked.
+ Heap* heap = GetHeap();
+ typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
+ typedef std::vector<const mirror::Object*>::const_iterator It2;
+ for (It it = references_.begin(), end = references_.end(); it != end; ++it) {
+ for (It2 it_ref = it->second.begin(), end_ref = it->second.end(); it_ref != end_ref;
+ ++it_ref ) {
+ CHECK(heap->IsLiveObjectLocked(*it_ref));
+ }
+ }
+
+ // Check the references of each clean card which is also in the mod union table.
+ CardTable* card_table = heap->GetCardTable();
+ for (It it = references_.begin(); it != references_.end(); ++it) {
+ const byte* card = &*it->first;
+ if (*card == CardTable::kCardClean) {
+ std::set<const Object*> reference_set;
+ for (It2 itr = it->second.begin(); itr != it->second.end();++itr) {
+ reference_set.insert(*itr);
+ }
+ ModUnionCheckReferences visitor(this, reference_set);
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ space::ContinuousSpace* space =
+ heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+ }
+ }
+}
+
+void ModUnionTableReferenceCache::Dump(std::ostream& os) {
+ CardTable* card_table = heap_->GetCardTable();
+ typedef std::set<byte*>::const_iterator It;
+ os << "ModUnionTable cleared cards: [";
+ for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+ byte* card = *it;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+ }
+ os << "]\nModUnionTable references: [";
+ typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It2;
+ for (It2 it = references_.begin(); it != references_.end(); ++it) {
+ const byte* card = &*it->first;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
+ typedef std::vector<const mirror::Object*>::const_iterator It3;
+ for (It3 itr = it->second.begin(); itr != it->second.end();++itr) {
+ os << reinterpret_cast<const void*>(*itr) << ",";
+ }
+ os << "},";
+ }
+}
+
+void ModUnionTableReferenceCache::Update() {
+ Heap* heap = GetHeap();
+ CardTable* card_table = heap->GetCardTable();
+
+ std::vector<const mirror::Object*> cards_references;
+ ModUnionReferenceVisitor visitor(this, &cards_references);
+
+ typedef std::set<byte*>::iterator It;
+ for (It it = cleared_cards_.begin(), cc_end = cleared_cards_.end(); it != cc_end; ++it) {
+ byte* card = *it;
+ // Clear and re-compute alloc space references associated with this card.
+ cards_references.clear();
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ SpaceBitmap* live_bitmap =
+ heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false)->GetLiveBitmap();
+ live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+
+ // Update the corresponding references for the card.
+ // TODO: C++0x auto
+ SafeMap<const byte*, std::vector<const mirror::Object*> >::iterator
+ found = references_.find(card);
+ if (found == references_.end()) {
+ if (cards_references.empty()) {
+ // No reason to add empty array.
+ continue;
+ }
+ references_.Put(card, cards_references);
+ } else {
+ found->second = cards_references;
+ }
+ }
+ cleared_cards_.clear();
+}
+
+void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_sweep) {
+ // TODO: C++0x auto
+ size_t count = 0;
+
+ typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
+ for (It it = references_.begin(); it != references_.end(); ++it) {
+ typedef std::vector<const mirror::Object*>::const_iterator It2;
+ for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+ mark_sweep->MarkRoot(*it_ref);
+ ++count;
+ }
+ }
+ if (VLOG_IS_ON(heap)) {
+ VLOG(gc) << "Marked " << count << " references in mod union table";
+ }
+}
+
+void ModUnionTableCardCache::ClearCards(space::ContinuousSpace* space) {
+ CardTable* card_table = GetHeap()->GetCardTable();
+ ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ // Clear dirty cards in the this space and update the corresponding mod-union bits.
+ card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+}
+
+// Mark all references to the alloc space(s).
+void ModUnionTableCardCache::MarkReferences(collector::MarkSweep* mark_sweep) {
+ CardTable* card_table = heap_->GetCardTable();
+ ModUnionScanImageRootVisitor visitor(mark_sweep);
+ typedef std::set<byte*>::const_iterator It;
+ It it = cleared_cards_.begin();
+ It cc_end = cleared_cards_.end();
+ if (it != cc_end) {
+ byte* card = *it;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ space::ContinuousSpace* cur_space =
+ heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ accounting::SpaceBitmap* cur_live_bitmap = cur_space->GetLiveBitmap();
+ cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+ for (++it; it != cc_end; ++it) {
+ card = *it;
+ start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ end = start + CardTable::kCardSize;
+ if (UNLIKELY(!cur_space->Contains(reinterpret_cast<Object*>(start)))) {
+ cur_space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ cur_live_bitmap = cur_space->GetLiveBitmap();
+ }
+ cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+ }
+ }
+}
+
+void ModUnionTableCardCache::Dump(std::ostream& os) {
+ CardTable* card_table = heap_->GetCardTable();
+ typedef std::set<byte*>::const_iterator It;
+ os << "ModUnionTable dirty cards: [";
+ for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+ byte* card = *it;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+ }
+ os << "]";
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/src/gc/accounting/mod_union_table.h b/src/gc/accounting/mod_union_table.h
new file mode 100644
index 0000000..5d25e05
--- /dev/null
+++ b/src/gc/accounting/mod_union_table.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+#define ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+
+#include "globals.h"
+#include "safe_map.h"
+
+#include <set>
+#include <vector>
+
+namespace art {
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+namespace gc {
+
+namespace collector {
+ class MarkSweep;
+} // namespace collector
+namespace space {
+ class ContinuousSpace;
+ class Space;
+} // namespace space
+
+class Heap;
+
+namespace accounting {
+
+class SpaceBitmap;
+class HeapBitmap;
+
+// The mod-union table is the union of modified cards. It is used to allow the card table to be
+// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
+class ModUnionTable {
+ public:
+ ModUnionTable(Heap* heap) : heap_(heap) {
+ }
+
+ virtual ~ModUnionTable() {
+ }
+
+ // Clear cards which map to a memory range of a space. This doesn't immediately update the
+ // mod-union table, as updating the mod-union table may have an associated cost, such as
+ // determining references to track.
+ virtual void ClearCards(space::ContinuousSpace* space) = 0;
+
+ // Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
+ // before a call to update, for example, back-to-back sticky GCs.
+ virtual void Update() = 0;
+
+ // Mark the bitmaps for all references which are stored in the mod-union table.
+ virtual void MarkReferences(collector::MarkSweep* mark_sweep) = 0;
+
+ // Verification, sanity checks that we don't have clean cards which conflict with out cached data
+ // for said cards. Exclusive lock is required since verify sometimes uses
+ // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
+ // bitmap or not.
+ virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+
+ virtual void Dump(std::ostream& os) = 0;
+
+ Heap* GetHeap() const {
+ return heap_;
+ }
+
+ protected:
+ Heap* const heap_;
+};
+
+// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
+class ModUnionTableReferenceCache : public ModUnionTable {
+ public:
+ ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {}
+ virtual ~ModUnionTableReferenceCache() {}
+
+ // Clear and store cards for a space.
+ void ClearCards(space::ContinuousSpace* space);
+
+ // Update table based on cleared cards.
+ void Update()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Mark all references to the alloc space(s).
+ void MarkReferences(collector::MarkSweep* mark_sweep)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
+ // VisitMarkedRange can't know if the callback will modify the bitmap or not.
+ void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Function that tells whether or not to add a reference to the table.
+ virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
+
+ void Dump(std::ostream& os);
+
+ protected:
+ // Cleared card array, used to update the mod-union table.
+ std::set<byte*> cleared_cards_;
+
+ // Maps from dirty cards to their corresponding alloc space references.
+ SafeMap<const byte*, std::vector<const mirror::Object*> > references_;
+};
+
+// Card caching implementation. Keeps track of which cards we cleared and only this information.
+class ModUnionTableCardCache : public ModUnionTable {
+ public:
+ ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {}
+ virtual ~ModUnionTableCardCache() {}
+
+ // Clear and store cards for a space.
+ void ClearCards(space::ContinuousSpace* space);
+
+ // Nothing to update as all dirty cards were placed into cleared cards during clearing.
+ void Update() {}
+
+ // Mark all references to the alloc space(s).
+ void MarkReferences(collector::MarkSweep* mark_sweep)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Nothing to verify.
+ void Verify() {}
+
+ void Dump(std::ostream& os);
+
+ protected:
+ // Cleared card array, used to update the mod-union table.
+ std::set<byte*> cleared_cards_;
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
diff --git a/src/gc/space_bitmap-inl.h b/src/gc/accounting/space_bitmap-inl.h
similarity index 94%
rename from src/gc/space_bitmap-inl.h
rename to src/gc/accounting/space_bitmap-inl.h
index dd91403..a4fd330 100644
--- a/src/gc/space_bitmap-inl.h
+++ b/src/gc/accounting/space_bitmap-inl.h
@@ -14,13 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_SRC_GC_SPACE_BITMAP_INL_H_
-#define ART_SRC_GC_SPACE_BITMAP_INL_H_
+#ifndef ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
+#define ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
#include "base/logging.h"
#include "cutils/atomic-inline.h"
+#include "utils.h"
namespace art {
+namespace gc {
+namespace accounting {
inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
@@ -136,6 +139,9 @@
}
return (old_word & mask) != 0;
}
+
+} // namespace accounting
+} // namespace gc
} // namespace art
-#endif // ART_SRC_GC_SPACE_BITMAP_INL_H_
+#endif // ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
diff --git a/src/gc/space_bitmap.cc b/src/gc/accounting/space_bitmap.cc
similarity index 96%
rename from src/gc/space_bitmap.cc
rename to src/gc/accounting/space_bitmap.cc
index 773aa1e..19f1128 100644
--- a/src/gc/space_bitmap.cc
+++ b/src/gc/accounting/space_bitmap.cc
@@ -14,19 +14,21 @@
* limitations under the License.
*/
-#include "heap_bitmap.h"
-
#include "base/logging.h"
#include "dex_file-inl.h"
+#include "heap_bitmap.h"
#include "mirror/class-inl.h"
#include "mirror/field-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "object_utils.h"
#include "space_bitmap-inl.h"
#include "UniquePtr.h"
#include "utils.h"
namespace art {
+namespace gc {
+namespace accounting {
std::string SpaceBitmap::GetName() const {
return name_;
@@ -36,6 +38,12 @@
name_ = name;
}
+std::string SpaceBitmap::Dump() const {
+ return StringPrintf("%s: %p-%p", name_.c_str(),
+ reinterpret_cast<void*>(HeapBegin()),
+ reinterpret_cast<void*>(HeapLimit()));
+}
+
void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) {
for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
callback(const_cast<mirror::Object*>(*it), arg);
@@ -72,8 +80,6 @@
// mem_map_->Trim(reinterpret_cast<byte*>(heap_begin_ + bitmap_size_));
}
-// Fill the bitmap with zeroes. Returns the bitmap's memory to the
-// system as a side-effect.
void SpaceBitmap::Clear() {
if (bitmap_begin_ != NULL) {
// This returns the memory to the system. Successive page faults
@@ -164,14 +170,6 @@
}
}
-} // namespace art
-
-// Support needed for in order traversal
-#include "mirror/object.h"
-#include "object_utils.h"
-
-namespace art {
-
static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
void* arg);
@@ -273,10 +271,6 @@
name_ = name;
}
-SpaceSetMap::SpaceSetMap(const std::string& name) : name_(name) {
-
-}
-
void SpaceSetMap::CopyFrom(const SpaceSetMap& space_set) {
contained_ = space_set.contained_;
}
@@ -287,6 +281,8 @@
<< "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
<< ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
<< "]";
- }
+}
+} // namespace accounting
+} // namespace gc
} // namespace art
diff --git a/src/gc/space_bitmap.h b/src/gc/accounting/space_bitmap.h
similarity index 92%
rename from src/gc/space_bitmap.h
rename to src/gc/accounting/space_bitmap.h
index 6bc06d6..bb487d8 100644
--- a/src/gc/space_bitmap.h
+++ b/src/gc/accounting/space_bitmap.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_SRC_GC_SPACE_BITMAP_H_
-#define ART_SRC_GC_SPACE_BITMAP_H_
+#ifndef ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
+#define ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
#include "locks.h"
#include "globals.h"
@@ -28,12 +28,17 @@
#include <vector>
namespace art {
+
namespace mirror {
-class Object;
+ class Object;
} // namespace mirror
+namespace gc {
+namespace accounting {
+
class SpaceBitmap {
public:
+ // Alignment of objects within spaces.
static const size_t kAlignment = 8;
typedef void Callback(mirror::Object* obj, void* arg);
@@ -52,7 +57,7 @@
// <index> is the index of .bits that contains the bit representing
// <offset>.
static size_t OffsetToIndex(size_t offset) {
- return offset / kAlignment / kBitsPerWord;
+ return offset / kAlignment / kBitsPerWord;
}
static uintptr_t IndexToOffset(size_t index) {
@@ -75,6 +80,7 @@
// Returns true if the object was previously marked.
bool AtomicTestAndSet(const mirror::Object* obj);
+ // Fill the bitmap with zeroes. Returns the bitmap's memory to the system as a side-effect.
void Clear();
bool Test(const mirror::Object* obj) const;
@@ -160,6 +166,8 @@
std::string GetName() const;
void SetName(const std::string& name);
+ std::string Dump() const;
+
const void* GetObjectWordAddress(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
const uintptr_t offset = addr - heap_begin_;
@@ -236,7 +244,8 @@
}
}
- SpaceSetMap(const std::string& name);
+ SpaceSetMap(const std::string& name) : name_(name) {}
+ ~SpaceSetMap() {}
Objects& GetObjects() {
return contained_;
@@ -249,6 +258,8 @@
std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap);
+} // namespace accounting
+} // namespace gc
} // namespace art
-#endif // ART_SRC_GC_SPACE_BITMAP_H_
+#endif // ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
diff --git a/src/gc/space_bitmap_test.cc b/src/gc/accounting/space_bitmap_test.cc
similarity index 96%
rename from src/gc/space_bitmap_test.cc
rename to src/gc/accounting/space_bitmap_test.cc
index 4645659..d00d7c2 100644
--- a/src/gc/space_bitmap_test.cc
+++ b/src/gc/accounting/space_bitmap_test.cc
@@ -17,7 +17,6 @@
#include "space_bitmap.h"
#include "common_test.h"
-#include "dlmalloc.h"
#include "globals.h"
#include "space_bitmap-inl.h"
#include "UniquePtr.h"
@@ -25,6 +24,8 @@
#include <stdint.h>
namespace art {
+namespace gc {
+namespace accounting {
class SpaceBitmapTest : public CommonTest {
public:
@@ -87,4 +88,6 @@
}
}
+} // namespace accounting
+} // namespace gc
} // namespace art
diff --git a/src/gc/allocator/dlmalloc.cc b/src/gc/allocator/dlmalloc.cc
new file mode 100644
index 0000000..7584b6e
--- /dev/null
+++ b/src/gc/allocator/dlmalloc.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dlmalloc.h"
+
+#include "base/logging.h"
+
+// ART specific morecore implementation defined in space.cc.
+#define MORECORE(x) art_heap_morecore(m, x)
+extern "C" void* art_heap_morecore(void* m, intptr_t increment);
+
+// Custom heap error handling.
+#define PROCEED_ON_ERROR 0
+static void art_heap_corruption(const char* function);
+static void art_heap_usage_error(const char* function, void* p);
+#define CORRUPTION_ERROR_ACTION(m) art_heap_corruption(__FUNCTION__)
+#define USAGE_ERROR_ACTION(m,p) art_heap_usage_error(__FUNCTION__, p)
+
+// Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for
+// mspaces (regular dlmalloc is still declared in bionic).
+#pragma GCC diagnostic ignored "-Wempty-body"
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#include "../../../bionic/libc/upstream-dlmalloc/malloc.c"
+#pragma GCC diagnostic warning "-Wstrict-aliasing"
+#pragma GCC diagnostic warning "-Wempty-body"
+
+
+static void art_heap_corruption(const char* function) {
+ LOG(FATAL) << "Corrupt heap detected in: " << function;
+}
+
+static void art_heap_usage_error(const char* function, void* p) {
+ LOG(FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+}
+
+#include "globals.h"
+#include "utils.h"
+#include <sys/mman.h>
+
+using namespace art;
+extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
+ // Is this chunk in use?
+ if (used_bytes != 0) {
+ return;
+ }
+ // Do we have any whole pages to give back?
+ start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
+ end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
+ if (end > start) {
+ size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
+ int rc = madvise(start, length, MADV_DONTNEED);
+ if (UNLIKELY(rc != 0)) {
+ errno = rc;
+ PLOG(FATAL) << "madvise failed during heap trimming";
+ }
+ size_t* reclaimed = reinterpret_cast<size_t*>(arg);
+ *reclaimed += length;
+ }
+}
diff --git a/src/gc/allocator/dlmalloc.h b/src/gc/allocator/dlmalloc.h
new file mode 100644
index 0000000..6b02a44
--- /dev/null
+++ b/src/gc/allocator/dlmalloc.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
+#define ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
+
+// Configure dlmalloc for mspaces.
+#define HAVE_MMAP 0
+#define HAVE_MREMAP 0
+#define HAVE_MORECORE 1
+#define MSPACES 1
+#define NO_MALLINFO 1
+#define ONLY_MSPACES 1
+#define MALLOC_INSPECT_ALL 1
+
+#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
+
+// Define dlmalloc routines from bionic that cannot be included directly because of redefining
+// symbols from the include above.
+extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
+extern "C" int dlmalloc_trim(size_t);
+
+// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
+// pages back to the kernel.
+extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
+
+#endif // ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
diff --git a/src/gc/collector/garbage_collector.cc b/src/gc/collector/garbage_collector.cc
new file mode 100644
index 0000000..7412835
--- /dev/null
+++ b/src/gc/collector/garbage_collector.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "garbage_collector.h"
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "thread.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
+ : heap_(heap),
+ name_(name),
+ verbose_(VLOG_IS_ON(heap)),
+ duration_ns_(0),
+ timings_(name_.c_str(), true, verbose_),
+ cumulative_timings_(name) {
+ ResetCumulativeStatistics();
+}
+
+bool GarbageCollector::HandleDirtyObjectsPhase() {
+ DCHECK(IsConcurrent());
+ return true;
+}
+
+void GarbageCollector::RegisterPause(uint64_t nano_length) {
+ pause_times_.push_back(nano_length);
+}
+
+void GarbageCollector::ResetCumulativeStatistics() {
+ cumulative_timings_.Reset();
+ total_time_ns_ = 0;
+ total_paused_time_ns_ = 0;
+ total_freed_objects_ = 0;
+ total_freed_bytes_ = 0;
+}
+
+void GarbageCollector::Run() {
+ Thread* self = Thread::Current();
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+
+ uint64_t start_time = NanoTime();
+ pause_times_.clear();
+ duration_ns_ = 0;
+
+ InitializePhase();
+
+ if (!IsConcurrent()) {
+ // Pause is the entire length of the GC.
+ uint64_t pause_start = NanoTime();
+ thread_list->SuspendAll();
+ MarkingPhase();
+ ReclaimPhase();
+ thread_list->ResumeAll();
+ uint64_t pause_end = NanoTime();
+ pause_times_.push_back(pause_end - pause_start);
+ } else {
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ MarkingPhase();
+ }
+ bool done = false;
+ while (!done) {
+ uint64_t pause_start = NanoTime();
+ thread_list->SuspendAll();
+ done = HandleDirtyObjectsPhase();
+ thread_list->ResumeAll();
+ uint64_t pause_end = NanoTime();
+ pause_times_.push_back(pause_end - pause_start);
+ }
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ ReclaimPhase();
+ }
+ }
+
+ uint64_t end_time = NanoTime();
+ duration_ns_ = end_time - start_time;
+
+ FinishPhase();
+}
+
+void GarbageCollector::SwapBitmaps() {
+ // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
+ // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
+ // bits of dead objects in the live bitmap.
+ const GcType gc_type = GetGcType();
+ const std::vector<space::ContinuousSpace*>& cont_spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = cont_spaces.begin(), end = cont_spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ // We never allocate into zygote spaces.
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
+ (gc_type == kGcTypeFull &&
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (live_bitmap != mark_bitmap) {
+ heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
+ heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+ space->AsDlMallocSpace()->SwapBitmaps();
+ }
+ }
+ }
+ const std::vector<space::DiscontinuousSpace*>& disc_spaces = GetHeap()->GetDiscontinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = disc_spaces.begin(), end = disc_spaces.end(); it != end; ++it) {
+ space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(*it);
+ accounting::SpaceSetMap* live_set = space->GetLiveObjects();
+ accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
+ heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
+ heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
+ space->SwapBitmaps();
+ }
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/src/gc/garbage_collector.h b/src/gc/collector/garbage_collector.h
similarity index 61%
rename from src/gc/garbage_collector.h
rename to src/gc/collector/garbage_collector.h
index a1014c2..1ab3957 100644
--- a/src/gc/garbage_collector.h
+++ b/src/gc/collector/garbage_collector.h
@@ -17,28 +17,38 @@
#ifndef ART_SRC_GC_GARBAGE_COLLECTOR_H_
#define ART_SRC_GC_GARBAGE_COLLECTOR_H_
+#include "gc_type.h"
#include "locks.h"
+#include "base/timing_logger.h"
#include <stdint.h>
#include <vector>
namespace art {
+namespace gc {
class Heap;
+namespace collector {
+
class GarbageCollector {
public:
// Returns true iff the garbage collector is concurrent.
virtual bool IsConcurrent() const = 0;
- GarbageCollector(Heap* heap);
+ GarbageCollector(Heap* heap, const std::string& name);
+ virtual ~GarbageCollector() { }
- virtual ~GarbageCollector();
+ const char* GetName() const {
+ return name_.c_str();
+ }
+
+ virtual GcType GetGcType() const = 0;
// Run the garbage collector.
void Run();
- Heap* GetHeap() {
+ Heap* GetHeap() const {
return heap_;
}
@@ -48,16 +58,28 @@
}
// Returns how long the GC took to complete in nanoseconds.
- uint64_t GetDuration() const {
- return duration_;
+ uint64_t GetDurationNs() const {
+ return duration_ns_;
}
-
- virtual std::string GetName() const = 0;
-
void RegisterPause(uint64_t nano_length);
+ base::NewTimingLogger& GetTimings() {
+ return timings_;
+ }
+
+ CumulativeLogger& GetCumulativeTimings() {
+ return cumulative_timings_;
+ }
+
+ void ResetCumulativeStatistics();
+
+ // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
+ // this is the allocation space, for full GC then we swap the zygote bitmaps too.
+ void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
protected:
+
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
@@ -73,11 +95,28 @@
// Called after the GC is finished. Done without mutators paused.
virtual void FinishPhase() = 0;
- Heap* heap_;
+ Heap* const heap_;
+
+ std::string name_;
+
+ const bool verbose_;
+
+ uint64_t duration_ns_;
+ base::NewTimingLogger timings_;
+
+ // Cumulative statistics.
+ uint64_t total_time_ns_;
+ uint64_t total_paused_time_ns_;
+ uint64_t total_freed_objects_;
+ uint64_t total_freed_bytes_;
+
+ CumulativeLogger cumulative_timings_;
+
std::vector<uint64_t> pause_times_;
- uint64_t duration_;
};
+} // namespace collector
+} // namespace gc
} // namespace art
#endif // ART_SRC_GC_GARBAGE_COLLECTOR_H_
diff --git a/src/gc/collector/gc_type.cc b/src/gc/collector/gc_type.cc
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/gc/collector/gc_type.cc
diff --git a/src/gc/collector/gc_type.h b/src/gc/collector/gc_type.h
new file mode 100644
index 0000000..bb25bb9
--- /dev/null
+++ b/src/gc/collector/gc_type.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_COLLECTOR_GC_TYPE_H_
+#define ART_SRC_GC_COLLECTOR_GC_TYPE_H_
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+namespace collector {
+
+// The type of collection to be performed. The ordering of the enum matters, it is used to
+// determine which GCs are run first.
+enum GcType {
+ // Placeholder for when no GC has been performed.
+ kGcTypeNone,
+ // Sticky mark bits GC that attempts to only free objects allocated since the last GC.
+ kGcTypeSticky,
+ // Partial GC that marks the application heap but not the Zygote.
+ kGcTypePartial,
+ // Full GC that marks and frees in both the application and Zygote heap.
+ kGcTypeFull,
+ // Number of different GC types.
+ kGcTypeMax,
+};
+std::ostream& operator<<(std::ostream& os, const GcType& policy);
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_COLLECTOR_GC_TYPE_H_
diff --git a/src/gc/mark_sweep-inl.h b/src/gc/collector/mark_sweep-inl.h
similarity index 97%
rename from src/gc/mark_sweep-inl.h
rename to src/gc/collector/mark_sweep-inl.h
index 7265023..ea9fced 100644
--- a/src/gc/mark_sweep-inl.h
+++ b/src/gc/collector/mark_sweep-inl.h
@@ -17,12 +17,16 @@
#ifndef ART_SRC_GC_MARK_SWEEP_INL_H_
#define ART_SRC_GC_MARK_SWEEP_INL_H_
-#include "heap.h"
+#include "gc/collector/mark_sweep.h"
+
+#include "gc/heap.h"
#include "mirror/class.h"
#include "mirror/field.h"
#include "mirror/object_array.h"
namespace art {
+namespace gc {
+namespace collector {
template <typename MarkVisitor>
inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
@@ -154,6 +158,8 @@
}
}
+} // namespace collector
+} // namespace gc
} // namespace art
#endif // ART_SRC_GC_MARK_SWEEP_INL_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/collector/mark_sweep.cc
similarity index 76%
rename from src/gc/mark_sweep.cc
rename to src/gc/collector/mark_sweep.cc
index 14d604a..d54fec6 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/collector/mark_sweep.cc
@@ -25,13 +25,16 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
-#include "card_table.h"
-#include "card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
#include "indirect_reference_table.h"
#include "intern_table.h"
#include "jni_internal.h"
-#include "large_object_space.h"
#include "monitor.h"
#include "mark_sweep-inl.h"
#include "mirror/class-inl.h"
@@ -43,15 +46,15 @@
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
-#include "space.h"
-#include "space_bitmap-inl.h"
-#include "thread.h"
+#include "thread-inl.h"
#include "thread_list.h"
#include "verifier/method_verifier.h"
using namespace art::mirror;
namespace art {
+namespace gc {
+namespace collector {
// Performance options.
static const bool kParallelMarkStack = true;
@@ -68,7 +71,6 @@
class SetFingerVisitor {
public:
SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-
}
void operator ()(void* finger) const {
@@ -79,13 +81,7 @@
MarkSweep* const mark_sweep_;
};
-std::string MarkSweep::GetName() const {
- std::ostringstream ss;
- ss << (IsConcurrent() ? "Concurrent" : "") << GetGcType();
- return ss.str();
-}
-
-void MarkSweep::ImmuneSpace(ContinuousSpace* space) {
+void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
// Bind live to mark bitmap if necessary.
if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
BindLiveToMarkBitmap(space);
@@ -97,54 +93,68 @@
SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
reinterpret_cast<Object*>(space->End()));
} else {
- const Spaces& spaces = GetHeap()->GetSpaces();
- const ContinuousSpace* prev_space = NULL;
- // Find out if the previous space is immune.
- // TODO: C++0x
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if (*it == space) {
- break;
- }
- prev_space = *it;
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ const space::ContinuousSpace* prev_space = NULL;
+ // Find out if the previous space is immune.
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ if (*it == space) {
+ break;
}
+ prev_space = *it;
+ }
- // If previous space was immune, then extend the immune region.
- if (prev_space != NULL &&
- immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
- immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+ // If previous space was immune, then extend the immune region. Relies on continuous spaces
+ // being sorted by Heap::AddContinuousSpace.
+ if (prev_space != NULL &&
+ immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
+ immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
}
}
}
-// Bind the live bits to the mark bits of bitmaps based on the gc type.
void MarkSweep::BindBitmaps() {
- Spaces& spaces = GetHeap()->GetSpaces();
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->GetGcRetentionPolicy() == kGcRetentionPolicyNeverCollect) {
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
ImmuneSpace(space);
}
}
}
-MarkSweep::MarkSweep(Heap* heap, bool is_concurrent)
- : GarbageCollector(heap),
+MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+ : GarbageCollector(heap,
+ name_prefix + (name_prefix.empty() ? "" : " ") +
+ (is_concurrent ? "concurrent mark sweep": "mark sweep")),
+ current_mark_bitmap_(NULL),
+ java_lang_Class_(NULL),
+ mark_stack_(NULL),
+ finger_(NULL),
+ immune_begin_(NULL),
+ immune_end_(NULL),
+ soft_reference_list_(NULL),
+ weak_reference_list_(NULL),
+ finalizer_reference_list_(NULL),
+ phantom_reference_list_(NULL),
+ cleared_reference_list_(NULL),
gc_barrier_(new Barrier(0)),
large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
mark_stack_expand_lock_("mark sweep mark stack expand lock"),
is_concurrent_(is_concurrent),
- timings_(GetName(), true),
- cumulative_timings_(GetName()) {
- cumulative_timings_.SetName(GetName());
- ResetCumulativeStatistics();
+ clear_soft_references_(false) {
}
void MarkSweep::InitializePhase() {
+ timings_.Reset();
+ timings_.StartSplit("InitializePhase");
mark_stack_ = GetHeap()->mark_stack_.get();
DCHECK(mark_stack_ != NULL);
finger_ = NULL;
@@ -169,34 +179,31 @@
java_lang_Class_ = Class::GetJavaLangClass();
CHECK(java_lang_Class_ != NULL);
FindDefaultMarkBitmap();
- // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
- Runtime::Current()->DirtyRoots();
- timings_.Reset();
// Do any pre GC verification.
heap_->PreGcVerification(this);
}
void MarkSweep::ProcessReferences(Thread* self) {
+ timings_.NewSplit("ProcessReferences");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
&finalizer_reference_list_, &phantom_reference_list_);
- timings_.AddSplit("ProcessReferences");
}
bool MarkSweep::HandleDirtyObjectsPhase() {
Thread* self = Thread::Current();
- ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
+ accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Locks::mutator_lock_->AssertExclusiveHeld(self);
{
+ timings_.NewSplit("ReMarkRoots");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Re-mark root set.
ReMarkRoots();
- timings_.AddSplit("ReMarkRoots");
// Scan dirty objects, this is only required if we are not doing concurrent GC.
- RecursiveMarkDirtyObjects(CardTable::kCardDirty);
+ RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
}
ProcessReferences(self);
@@ -206,15 +213,17 @@
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// This second sweep makes sure that we don't have any objects in the live stack which point to
// freed objects. These cause problems since their references may be previously freed objects.
- SweepArray(timings_, allocation_stack, false);
+ SweepArray(allocation_stack, false);
} else {
+ timings_.NewSplit("UnMarkAllocStack");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // We only sweep over the live stack, and the live stack should not intersect with the
- // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
+ // The allocation stack contains things allocated since the start of the GC. These may have been
+ // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
+ // Remove these objects from the mark bitmaps so that they will be eligible for sticky
+ // collection.
heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
- GetHeap()->large_object_space_->GetMarkObjects(),
- allocation_stack);
- timings_.AddSplit("UnMarkAllocStack");
+ GetHeap()->large_object_space_->GetMarkObjects(),
+ allocation_stack);
}
return true;
}
@@ -227,31 +236,30 @@
Heap* heap = GetHeap();
Thread* self = Thread::Current();
+ timings_.NewSplit("BindBitmaps");
BindBitmaps();
FindDefaultMarkBitmap();
- timings_.AddSplit("BindBitmaps");
-
// Process dirty cards and add dirty cards to mod union tables.
heap->ProcessCards(timings_);
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
+ timings_.NewSplit("SwapStacks");
heap->SwapStacks();
- timings_.AddSplit("SwapStacks");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// If we exclusively hold the mutator lock, all threads must be suspended.
+ timings_.NewSplit("MarkRoots");
MarkRoots();
- timings_.AddSplit("MarkConcurrentRoots");
} else {
- MarkRootsCheckpoint();
- timings_.AddSplit("MarkRootsCheckpoint");
+ timings_.NewSplit("MarkRootsCheckpoint");
+ MarkRootsCheckpoint(self);
+ timings_.NewSplit("MarkNonThreadRoots");
MarkNonThreadRoots();
- timings_.AddSplit("MarkNonThreadRoots");
}
+ timings_.NewSplit("MarkConcurrentRoots");
MarkConcurrentRoots();
- timings_.AddSplit("MarkConcurrentRoots");
heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
MarkReachableObjects();
@@ -260,12 +268,12 @@
void MarkSweep::MarkReachableObjects() {
// Mark everything allocated since the last as GC live so that we can sweep concurrently,
// knowing that new allocations won't be marked as live.
- ObjectStack* live_stack = heap_->GetLiveStack();
+ timings_.NewSplit("MarkStackAsLive");
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
heap_->large_object_space_->GetLiveObjects(),
live_stack);
live_stack->Reset();
- timings_.AddSplit("MarkStackAsLive");
// Recursively mark all the non-image bits set in the mark bitmap.
RecursiveMark();
DisableFinger();
@@ -289,60 +297,31 @@
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Reclaim unmarked objects.
- Sweep(timings_, false);
+ Sweep(false);
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
+ timings_.NewSplit("SwapBitmaps");
SwapBitmaps();
- timings_.AddSplit("SwapBitmaps");
// Unbind the live and mark bitmaps.
UnBindBitmaps();
}
}
-void MarkSweep::SwapBitmaps() {
- // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
- // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
- // bits of dead objects in the live bitmap.
- const GcType gc_type = GetGcType();
- // TODO: C++0x
- Spaces& spaces = heap_->GetSpaces();
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- // We never allocate into zygote spaces.
- if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
- (gc_type == kGcTypeFull &&
- space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- if (live_bitmap != mark_bitmap) {
- heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
- heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
- space->AsAllocSpace()->SwapBitmaps();
- }
- }
- }
- SwapLargeObjects();
-}
-
-void MarkSweep::SwapLargeObjects() {
- LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
- large_object_space->SwapBitmaps();
- heap_->GetLiveBitmap()->SetLargeObjects(large_object_space->GetLiveObjects());
- heap_->GetMarkBitmap()->SetLargeObjects(large_object_space->GetMarkObjects());
-}
-
void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
immune_begin_ = begin;
immune_end_ = end;
}
void MarkSweep::FindDefaultMarkBitmap() {
- const Spaces& spaces = heap_->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
current_mark_bitmap_ = (*it)->GetMarkBitmap();
CHECK(current_mark_bitmap_ != NULL);
return;
@@ -389,10 +368,10 @@
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
- SpaceBitmap* object_bitmap = current_mark_bitmap_;
+ accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
- if (new_bitmap != NULL) {
+ accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+ if (LIKELY(new_bitmap != NULL)) {
object_bitmap = new_bitmap;
} else {
MarkLargeObject(obj);
@@ -416,8 +395,9 @@
// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
bool MarkSweep::MarkLargeObject(const Object* obj) {
- LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+ // TODO: support >1 discontinuous space.
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
if (kProfileLargeObjects) {
++large_object_test_;
}
@@ -450,9 +430,9 @@
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
- SpaceBitmap* object_bitmap = current_mark_bitmap_;
+ accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
+ accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
if (new_bitmap != NULL) {
object_bitmap = new_bitmap;
} else {
@@ -512,8 +492,8 @@
void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
// See if the root is on any space bitmap.
- if (GetHeap()->GetLiveBitmap()->GetSpaceBitmap(root) == NULL) {
- LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
if (!large_object_space->Contains(root)) {
LOG(ERROR) << "Found invalid root: " << root;
if (visitor != NULL) {
@@ -537,7 +517,8 @@
}
void MarkSweep::MarkConcurrentRoots() {
- Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this);
+ // Visit all runtime roots and clear dirty flags.
+ Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
}
class CheckObjectVisitor {
@@ -573,11 +554,11 @@
mark_sweep->CheckObject(root);
}
-void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
- CHECK(space->IsAllocSpace());
- DlMallocSpace* alloc_space = space->AsAllocSpace();
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+ CHECK(space->IsDlMallocSpace());
+ space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
alloc_space->temp_bitmap_.reset(mark_bitmap);
alloc_space->mark_bitmap_.reset(live_bitmap);
@@ -586,7 +567,6 @@
class ScanObjectVisitor {
public:
ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-
}
// TODO: Fixme when anotatalysis works with visitors.
@@ -603,29 +583,39 @@
};
void MarkSweep::ScanGrayObjects(byte minimum_age) {
- const Spaces& spaces = heap_->GetSpaces();
- CardTable* card_table = heap_->GetCardTable();
+ accounting::CardTable* card_table = GetHeap()->GetCardTable();
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
ScanObjectVisitor visitor(this);
SetFingerVisitor finger_visitor(this);
- // TODO: C++ 0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
+ space::ContinuousSpace* space = *it;
+ switch (space->GetGcRetentionPolicy()) {
+ case space::kGcRetentionPolicyNeverCollect:
+ timings_.NewSplit("ScanGrayImageSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyFullCollect:
+ timings_.NewSplit("ScanGrayZygoteSpaceObjects");
+ break;
+ case space::kGcRetentionPolicyAlwaysCollect:
+ timings_.NewSplit("ScanGrayAllocSpaceObjects");
+ break;
+ }
byte* begin = space->Begin();
byte* end = space->End();
// Image spaces are handled properly since live == marked for them.
- SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- card_table->Scan(mark_bitmap, begin, end, visitor, VoidFunctor(), minimum_age);
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age);
}
}
class CheckBitmapVisitor {
public:
CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
-
}
- void operator ()(const Object* obj) const
- NO_THREAD_SAFETY_ANALYSIS {
+ void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kDebugLocking) {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
}
@@ -642,13 +632,15 @@
// objects which are either in the image space or marked objects in the alloc
// space
CheckBitmapVisitor visitor(this);
- const Spaces& spaces = heap_->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
if ((*it)->IsImageSpace()) {
- ImageSpace* space = (*it)->AsImageSpace();
+ space::ImageSpace* space = (*it)->AsImageSpace();
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
DCHECK(live_bitmap != NULL);
live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor());
}
@@ -658,6 +650,7 @@
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
void MarkSweep::RecursiveMark() {
+ timings_.NewSplit("RecursiveMark");
// RecursiveMark will build the lists of known instances of the Reference classes.
// See DelayReferenceReferent for details.
CHECK(soft_reference_list_ == NULL);
@@ -667,16 +660,17 @@
CHECK(cleared_reference_list_ == NULL);
const bool partial = GetGcType() == kGcTypePartial;
- const Spaces& spaces = heap_->GetSpaces();
SetFingerVisitor set_finger_visitor(this);
ScanObjectVisitor scan_visitor(this);
if (!kDisableFinger) {
finger_ = NULL;
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if ((space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
- (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
- ) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
+ (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
current_mark_bitmap_ = space->GetMarkBitmap();
if (current_mark_bitmap_ == NULL) {
GetHeap()->DumpSpaces();
@@ -690,9 +684,8 @@
}
}
DisableFinger();
- timings_.AddSplit("RecursiveMark");
+ timings_.NewSplit("ProcessMarkStack");
ProcessMarkStack();
- timings_.AddSplit("ProcessMarkStack");
}
bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
@@ -703,13 +696,12 @@
void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
ScanGrayObjects(minimum_age);
- timings_.AddSplit("ScanGrayObjects");
+ timings_.NewSplit("ProcessMarkStack");
ProcessMarkStack();
- timings_.AddSplit("ProcessMarkStack");
}
void MarkSweep::ReMarkRoots() {
- Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
+ Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
}
void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
@@ -726,7 +718,7 @@
}
struct ArrayMarkedCheck {
- ObjectStack* live_stack;
+ accounting::ObjectStack* live_stack;
MarkSweep* mark_sweep;
};
@@ -736,11 +728,11 @@
if (array_check->mark_sweep->IsMarked(object)) {
return true;
}
- ObjectStack* live_stack = array_check->live_stack;
+ accounting::ObjectStack* live_stack = array_check->live_stack;
return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
}
-void MarkSweep::SweepSystemWeaksArray(ObjectStack* allocations) {
+void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Runtime* runtime = Runtime::Current();
// The callbacks check
// !is_marked where is_marked is the callback but we want
@@ -777,7 +769,7 @@
void MarkSweep::VerifyIsLive(const Object* obj) {
Heap* heap = GetHeap();
if (!heap->GetLiveBitmap()->Test(obj)) {
- LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
if (!large_object_space->GetLiveObjects()->Test(obj)) {
if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
heap->allocation_stack_->End()) {
@@ -807,7 +799,7 @@
struct SweepCallbackContext {
MarkSweep* mark_sweep;
- AllocSpace* space;
+ space::AllocSpace* space;
Thread* self;
};
@@ -830,28 +822,29 @@
MarkSweep* mark_sweep_;
};
-void MarkSweep::ResetCumulativeStatistics() {
- cumulative_timings_.Reset();
- total_time_ = 0;
- total_paused_time_ = 0;
- total_freed_objects_ = 0;
- total_freed_bytes_ = 0;
-}
-
-void MarkSweep::MarkRootsCheckpoint() {
+void MarkSweep::MarkRootsCheckpoint(Thread* self) {
CheckpointMarkThreadRoots check_point(this);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- // Increment the count of the barrier. If all of the checkpoints have already been finished then
- // will hit 0 and continue. Otherwise we are still waiting for some checkpoints, so the counter
- // will go positive and we will unblock when it hits zero.
- gc_barrier_->Increment(Thread::Current(), thread_list->RunCheckpoint(&check_point));
+ // Request the check point is run on all threads returning a count of the threads that must
+ // run through the barrier including self.
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ // Release locks then wait for all mutator threads to pass the barrier.
+ // TODO: optimize to not release locks when there are no threads to wait for.
+ Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
+ Locks::mutator_lock_->SharedUnlock(self);
+ ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
+ CHECK_EQ(old_state, kWaitingPerformingGc);
+ gc_barrier_->Increment(self, barrier_count);
+ self->SetState(kWaitingPerformingGc);
+ Locks::mutator_lock_->SharedLock(self);
+ Locks::heap_bitmap_lock_->ExclusiveLock(self);
}
void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
MarkSweep* mark_sweep = context->mark_sweep;
Heap* heap = mark_sweep->GetHeap();
- AllocSpace* space = context->space;
+ space::AllocSpace* space = context->space;
Thread* self = context->self;
Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
// Use a bulk free, that merges consecutive objects before freeing or free per object?
@@ -877,22 +870,23 @@
}
}
-void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
+void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
size_t freed_bytes = 0;
- DlMallocSpace* space = heap_->GetAllocSpace();
+ space::DlMallocSpace* space = heap_->GetAllocSpace();
// If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
// bitmap, resulting in occasional frees of Weaks which are still in use.
+ timings_.NewSplit("SweepSystemWeaks");
SweepSystemWeaksArray(allocations);
- logger.AddSplit("SweepSystemWeaks");
+ timings_.NewSplit("Process allocation stack");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
- SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+ accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
std::swap(large_live_objects, large_mark_objects);
@@ -918,7 +912,8 @@
freed_bytes += large_object_space->Free(self, obj);
}
}
- logger.AddSplit("Process allocation stack");
+ CHECK_EQ(count, allocations->Size());
+ timings_.NewSplit("FreeList");
size_t freed_objects = out - objects;
freed_bytes += space->FreeList(self, freed_objects, objects);
@@ -927,71 +922,78 @@
heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
freed_objects_ += freed_objects;
freed_bytes_ += freed_bytes;
- logger.AddSplit("FreeList");
+
+ timings_.NewSplit("ResetStack");
allocations->Reset();
- logger.AddSplit("ResetStack");
}
-void MarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+void MarkSweep::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
// If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
// bitmap, resulting in occasional frees of Weaks which are still in use.
+ timings_.NewSplit("SweepSystemWeaks");
SweepSystemWeaks();
- timings.AddSplit("SweepSystemWeaks");
- const bool partial = GetGcType() == kGcTypePartial;
- const Spaces& spaces = heap_->GetSpaces();
+ const bool partial = (GetGcType() == kGcTypePartial);
SweepCallbackContext scc;
scc.mark_sweep = this;
scc.self = Thread::Current();
- // TODO: C++0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (
- space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
- (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
- ) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ // We always sweep always collect spaces.
+ bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+ if (!partial && !sweep_space) {
+ // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+ sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+ }
+ if (sweep_space) {
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- scc.space = space->AsAllocSpace();
- SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ scc.space = space->AsDlMallocSpace();
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+ if (!space->IsZygoteSpace()) {
+ timings_.NewSplit("SweepAllocSpace");
// Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
- SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &SweepCallback, reinterpret_cast<void*>(&scc));
+ accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+ &SweepCallback, reinterpret_cast<void*>(&scc));
} else {
- // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual memory.
- SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+ timings_.NewSplit("SweepZygote");
+ // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+ // memory.
+ accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+ &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
}
}
}
- timings.AddSplit("Sweep");
+ timings_.NewSplit("SweepLargeObjects");
SweepLargeObjects(swap_bitmaps);
- timings.AddSplit("SweepLargeObjects");
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
// Sweep large objects
- LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
- SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+ accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
if (swap_bitmaps) {
std::swap(large_live_objects, large_mark_objects);
}
- SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
+ accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
// O(n*log(n)) but hopefully there are not too many large objects.
size_t freed_objects = 0;
size_t freed_bytes = 0;
- // TODO: C++0x
Thread* self = Thread::Current();
- for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
+ // TODO: C++0x
+ typedef accounting::SpaceSetMap::Objects::iterator It;
+ for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
if (!large_mark_objects->Test(*it)) {
freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
++freed_objects;
@@ -999,20 +1001,21 @@
}
freed_objects_ += freed_objects;
freed_bytes_ += freed_bytes;
- // Large objects don't count towards bytes_allocated.
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
- const Spaces& spaces = heap_->GetSpaces();
- // TODO: C++0x auto
- for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace() && space->Contains(ref)) {
DCHECK(IsMarked(obj));
bool is_marked = IsMarked(ref);
if (!is_marked) {
- LOG(INFO) << **cur;
+ LOG(INFO) << *space;
LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
<< "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
<< "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
@@ -1109,7 +1112,7 @@
}
class MarkStackChunk : public Task {
-public:
+ public:
MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
@@ -1171,6 +1174,7 @@
// Don't need to use atomic ++ since we only one thread is writing to an output block at any
// given time.
void Push(Object* obj) {
+ CHECK(obj != NULL);
data_[length_++] = obj;
}
@@ -1178,7 +1182,7 @@
if (static_cast<size_t>(length_) < max_size) {
Push(const_cast<Object*>(obj));
} else {
- // Internal buffer is full, push to a new buffer instead.
+ // Internal (thread-local) buffer is full, push to a new buffer instead.
if (UNLIKELY(output_ == NULL)) {
AllocateOutputChunk();
} else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
@@ -1257,8 +1261,8 @@
thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
}
thread_pool->StartWorkers(self);
+ thread_pool->Wait(self, true, true);
mark_stack_->Reset();
- thread_pool->Wait(self, true);
//LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
}
@@ -1445,15 +1449,16 @@
}
void MarkSweep::UnBindBitmaps() {
- const Spaces& spaces = heap_->GetSpaces();
- // TODO: C++0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
- if (space->IsAllocSpace()) {
- DlMallocSpace* alloc_space = space->AsAllocSpace();
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace()) {
+ space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
if (alloc_space->temp_bitmap_.get() != NULL) {
// At this point, the temp_bitmap holds our old mark bitmap.
- SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
+ accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
alloc_space->mark_bitmap_.reset(new_bitmap);
@@ -1466,20 +1471,21 @@
void MarkSweep::FinishPhase() {
// Can't enqueue referneces if we hold the mutator lock.
Object* cleared_references = GetClearedReferences();
- heap_->EnqueueClearedReferences(&cleared_references);
+ Heap* heap = GetHeap();
+ heap->EnqueueClearedReferences(&cleared_references);
- heap_->PostGcVerification(this);
+ heap->PostGcVerification(this);
- heap_->GrowForUtilization(GetDuration());
- timings_.AddSplit("GrowForUtilization");
+ timings_.NewSplit("GrowForUtilization");
+ heap->GrowForUtilization(GetDurationNs());
- heap_->RequestHeapTrim();
- timings_.AddSplit("RequestHeapTrim");
+ timings_.NewSplit("RequestHeapTrim");
+ heap->RequestHeapTrim();
// Update the cumulative statistics
- total_time_ += GetDuration();
- total_paused_time_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
- std::plus<uint64_t>());
+ total_time_ns_ += GetDurationNs();
+ total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+ std::plus<uint64_t>());
total_freed_objects_ += GetFreedObjects();
total_freed_bytes_ += GetFreedBytes();
@@ -1513,27 +1519,26 @@
// Update the cumulative loggers.
cumulative_timings_.Start();
- cumulative_timings_.AddLogger(timings_);
+ cumulative_timings_.AddNewLogger(timings_);
cumulative_timings_.End();
// Clear all of the spaces' mark bitmaps.
- const Spaces& spaces = heap_->GetSpaces();
- // TODO: C++0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
space->GetMarkBitmap()->Clear();
}
}
mark_stack_->Reset();
// Reset the marked large objects.
- LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+ space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
large_objects->GetMarkObjects()->Clear();
}
-MarkSweep::~MarkSweep() {
-
-}
-
+} // namespace collector
+} // namespace gc
} // namespace art
diff --git a/src/gc/mark_sweep.h b/src/gc/collector/mark_sweep.h
similarity index 86%
rename from src/gc/mark_sweep.h
rename to src/gc/collector/mark_sweep.h
index 11ce32f..9df3c19 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/collector/mark_sweep.h
@@ -21,40 +21,50 @@
#include "barrier.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "base/timing_logger.h"
#include "garbage_collector.h"
-#include "gc_type.h"
#include "offsets.h"
#include "root_visitor.h"
#include "UniquePtr.h"
namespace art {
+
namespace mirror {
-class Class;
-class Object;
-template<class T> class ObjectArray;
-}
-template <typename T> class AtomicStack;
-class CheckObjectVisitor;
-class ContinuousSpace;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-typedef AtomicStack<mirror::Object*> ObjectStack;
-class SpaceBitmap;
+ class Class;
+ class Object;
+ template<class T> class ObjectArray;
+} // namespace mirror
+
class StackVisitor;
class Thread;
-class MarkStackChunk;
+
+namespace gc {
+
+namespace accounting {
+ template <typename T> class AtomicStack;
+ class MarkIfReachesAllocspaceVisitor;
+ class ModUnionClearCardVisitor;
+ class ModUnionVisitor;
+ class ModUnionTableBitmap;
+ class MarkStackChunk;
+ typedef AtomicStack<mirror::Object*> ObjectStack;
+ class SpaceBitmap;
+} // namespace accounting
+
+namespace space {
+ class ContinuousSpace;
+} // namespace space
+
+class CheckObjectVisitor;
+class Heap;
+
+namespace collector {
class MarkSweep : public GarbageCollector {
public:
- explicit MarkSweep(Heap* heap, bool is_concurrent);
+ explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
- ~MarkSweep();
+ ~MarkSweep() {}
- virtual std::string GetName() const;
virtual void InitializePhase();
virtual bool IsConcurrent() const;
virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -85,8 +95,9 @@
void MarkConcurrentRoots();
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void MarkRootsCheckpoint();
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void MarkRootsCheckpoint(Thread* self)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Verify that image roots point to only marked objects within the alloc space.
void VerifyImageRoots()
@@ -98,16 +109,17 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Make a space immune, immune spaces are assumed to have all live objects marked.
- void ImmuneSpace(ContinuousSpace* space)
+ // Make a space immune, immune spaces have all live objects marked - that is the mark and
+ // live bitmaps are bound together.
+ void ImmuneSpace(space::ContinuousSpace* space)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Bind the live bits to the mark bits of bitmaps based on the gc type.
- virtual void BindBitmaps()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+ // the image. Mark that portion of the heap as immune.
+ virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void BindLiveToMarkBitmap(ContinuousSpace* space)
+ void BindLiveToMarkBitmap(space::ContinuousSpace* space)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void UnBindBitmaps()
@@ -127,21 +139,15 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Sweeps unmarked objects to complete the garbage collection.
- void SweepLargeObjects(bool swap_bitmaps)
- EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
+ void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Sweep only pointers within an array. WARNING: Trashes objects.
- void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
+ void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
- virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
mirror::Object* GetClearedReferences() {
return cleared_reference_list_;
}
@@ -177,12 +183,12 @@
return freed_objects_;
}
- uint64_t GetTotalTime() const {
- return total_time_;
+ uint64_t GetTotalTimeNs() const {
+ return total_time_ns_;
}
- uint64_t GetTotalPausedTime() const {
- return total_paused_time_;
+ uint64_t GetTotalPausedTimeNs() const {
+ return total_paused_time_ns_;
}
uint64_t GetTotalFreedObjects() const {
@@ -200,7 +206,7 @@
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Only sweep the weaks which are inside of an allocation stack.
- void SweepSystemWeaksArray(ObjectStack* allocations)
+ void SweepSystemWeaksArray(accounting::ObjectStack* allocations)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
@@ -237,16 +243,6 @@
return *gc_barrier_;
}
- TimingLogger& GetTimings() {
- return timings_;
- }
-
- CumulativeLogger& GetCumulativeTimings() {
- return cumulative_timings_;
- }
-
- void ResetCumulativeStatistics();
-
protected:
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const mirror::Object* object) const;
@@ -381,13 +377,14 @@
// Whether or not we count how many of each type of object were scanned.
static const bool kCountScannedTypes = false;
- // Current space, we check this space first to avoid searching for the appropriate space for an object.
- SpaceBitmap* current_mark_bitmap_;
+ // Current space, we check this space first to avoid searching for the appropriate space for an
+ // object.
+ accounting::SpaceBitmap* current_mark_bitmap_;
// Cache java.lang.Class for optimization.
mirror::Class* java_lang_Class_;
- ObjectStack* mark_stack_;
+ accounting::ObjectStack* mark_stack_;
mirror::Object* finger_;
@@ -401,10 +398,15 @@
mirror::Object* phantom_reference_list_;
mirror::Object* cleared_reference_list_;
+ // Number of bytes freed in this collection.
AtomicInteger freed_bytes_;
+ // Number of objects freed in this collection.
AtomicInteger freed_objects_;
+ // Number of classes scanned, if kCountScannedTypes.
AtomicInteger class_count_;
+ // Number of arrays scanned, if kCountScannedTypes.
AtomicInteger array_count_;
+ // Number of non-class/arrays scanned, if kCountScannedTypes.
AtomicInteger other_count_;
AtomicInteger large_object_test_;
AtomicInteger large_object_mark_;
@@ -414,28 +416,19 @@
AtomicInteger work_chunks_deleted_;
AtomicInteger reference_count_;
- // Cumulative statistics.
- uint64_t total_time_;
- uint64_t total_paused_time_;
- uint64_t total_freed_objects_;
- uint64_t total_freed_bytes_;
-
UniquePtr<Barrier> gc_barrier_;
Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mutex mark_stack_expand_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
const bool is_concurrent_;
- TimingLogger timings_;
- CumulativeLogger cumulative_timings_;
-
bool clear_soft_references_;
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CheckBitmapVisitor;
friend class CheckObjectVisitor;
friend class CheckReferenceVisitor;
- friend class Heap;
+ friend class art::gc::Heap;
friend class InternTableEntryIsUnmarked;
friend class MarkIfReachesAllocspaceVisitor;
friend class ModUnionCheckReferences;
@@ -453,6 +446,8 @@
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
};
+} // namespace collector
+} // namespace gc
} // namespace art
#endif // ART_SRC_GC_MARK_SWEEP_H_
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/collector/partial_mark_sweep.cc
similarity index 63%
rename from src/gc/partial_mark_sweep.cc
rename to src/gc/collector/partial_mark_sweep.cc
index f9c1787..ef893c5 100644
--- a/src/gc/partial_mark_sweep.cc
+++ b/src/gc/collector/partial_mark_sweep.cc
@@ -16,36 +16,38 @@
#include "partial_mark_sweep.h"
-#include "heap.h"
-#include "large_object_space.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
#include "partial_mark_sweep.h"
-#include "space.h"
#include "thread.h"
namespace art {
+namespace gc {
+namespace collector {
-PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
- : MarkSweep(heap, is_concurrent) {
+PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+ : MarkSweep(heap, is_concurrent, name_prefix + (name_prefix.empty() ? "" : " ") + "partial") {
cumulative_timings_.SetName(GetName());
}
-PartialMarkSweep::~PartialMarkSweep() {
-
-}
-
void PartialMarkSweep::BindBitmaps() {
MarkSweep::BindBitmaps();
- Spaces& spaces = GetHeap()->GetSpaces();
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
// zygote space are viewed as marked.
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(space->IsZygoteSpace());
ImmuneSpace(space);
}
}
}
+} // namespace collector
+} // namespace gc
} // namespace art
diff --git a/src/gc/collector/partial_mark_sweep.h b/src/gc/collector/partial_mark_sweep.h
new file mode 100644
index 0000000..bd4a580
--- /dev/null
+++ b/src/gc/collector/partial_mark_sweep.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "mark_sweep.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+class PartialMarkSweep : public MarkSweep {
+ public:
+ virtual GcType GetGcType() const {
+ return kGcTypePartial;
+ }
+
+ explicit PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
+ ~PartialMarkSweep() {}
+
+protected:
+ // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
+ // collections, ie the Zygote space. Also mark this space is immune.
+ virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/collector/sticky_mark_sweep.cc b/src/gc/collector/sticky_mark_sweep.cc
new file mode 100644
index 0000000..71e580d
--- /dev/null
+++ b/src/gc/collector/sticky_mark_sweep.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space.h"
+#include "sticky_mark_sweep.h"
+#include "thread.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+ : PartialMarkSweep(heap, is_concurrent,
+ name_prefix + (name_prefix.empty() ? "" : " ") + "sticky") {
+ cumulative_timings_.SetName(GetName());
+}
+
+void StickyMarkSweep::BindBitmaps() {
+ PartialMarkSweep::BindBitmaps();
+
+ const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
+ // know what was allocated since the last GC. A side-effect of binding the allocation space mark
+ // and live bitmap is that marking the objects will place them in the live bitmap.
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+ BindLiveToMarkBitmap(space);
+ }
+ }
+
+ GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+}
+
+void StickyMarkSweep::MarkReachableObjects() {
+ DisableFinger();
+ RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty - 1);
+}
+
+void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+ timings_.NewSplit("SweepArray");
+ accounting::ObjectStack* live_stack = GetHeap()->GetLiveStack();
+ SweepArray(live_stack, false);
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/collector/sticky_mark_sweep.h
similarity index 67%
rename from src/gc/sticky_mark_sweep.h
rename to src/gc/collector/sticky_mark_sweep.h
index 41ab0cc..b16cfc1 100644
--- a/src/gc/sticky_mark_sweep.h
+++ b/src/gc/collector/sticky_mark_sweep.h
@@ -22,29 +22,34 @@
#include "partial_mark_sweep.h"
namespace art {
+namespace gc {
+namespace collector {
class StickyMarkSweep : public PartialMarkSweep {
public:
- virtual GcType GetGcType() const {
+ GcType GetGcType() const {
return kGcTypeSticky;
}
- explicit StickyMarkSweep(Heap* heap, bool is_concurrent);
- ~StickyMarkSweep();
-protected:
- virtual void BindBitmaps()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
+ ~StickyMarkSweep() {}
- virtual void MarkReachableObjects()
+protected:
+ // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
+ // alloc space will be marked as immune.
+ void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void MarkReachableObjects()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
+} // namespace collector
+} // namespace gc
} // namespace art
#endif // ART_SRC_GC_STICKY_MARK_SWEEP_H_
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
deleted file mode 100644
index 94daec7..0000000
--- a/src/gc/garbage_collector.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "garbage_collector.h"
-
-#include "base/mutex-inl.h"
-#include "thread.h"
-#include "thread_list.h"
-
-namespace art {
- GarbageCollector::GarbageCollector(Heap* heap)
- : heap_(heap),
- duration_(0) {
-
- }
-
- bool GarbageCollector::HandleDirtyObjectsPhase() {
- DCHECK(IsConcurrent());
- return true;
- }
-
- void GarbageCollector::RegisterPause(uint64_t nano_length) {
- pause_times_.push_back(nano_length);
- }
-
- void GarbageCollector::Run() {
- Thread* self = Thread::Current();
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
-
- uint64_t start_time = NanoTime();
- pause_times_.clear();
- duration_ = 0;
-
- InitializePhase();
-
- if (!IsConcurrent()) {
- // Pause is the entire length of the GC.
- uint64_t pause_start = NanoTime();
- thread_list->SuspendAll();
- MarkingPhase();
- ReclaimPhase();
- thread_list->ResumeAll();
- uint64_t pause_end = NanoTime();
- pause_times_.push_back(pause_end - pause_start);
- } else {
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
- }
- bool done = false;
- while (!done) {
- uint64_t pause_start = NanoTime();
- thread_list->SuspendAll();
- done = HandleDirtyObjectsPhase();
- thread_list->ResumeAll();
- uint64_t pause_end = NanoTime();
- pause_times_.push_back(pause_end - pause_start);
- }
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- ReclaimPhase();
- }
- }
-
- uint64_t end_time = NanoTime();
- duration_ = end_time - start_time;
-
- FinishPhase();
- }
-
- GarbageCollector::~GarbageCollector() {
-
- }
-} // namespace art
diff --git a/src/gc/gc_type.h b/src/gc/gc_type.h
deleted file mode 100644
index 908f038..0000000
--- a/src/gc/gc_type.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_GC_TYPE_H_
-#define ART_SRC_GC_GC_TYPE_H_
-
-namespace art {
-
-// The ordering of the enum matters, it is used to determine which GCs are run first.
-enum GcType {
- // No Gc
- kGcTypeNone,
- // Sticky mark bits "generational" GC.
- kGcTypeSticky,
- // Partial GC, over only the alloc space.
- kGcTypePartial,
- // Full GC
- kGcTypeFull,
- // Number of different Gc types.
- kGcTypeMax,
-};
-std::ostream& operator<<(std::ostream& os, const GcType& policy);
-
-} // namespace art
-
-#endif // ART_SRC_GC_GC_TYPE_H_
diff --git a/src/gc/heap.cc b/src/gc/heap.cc
new file mode 100644
index 0000000..34c0b5c
--- /dev/null
+++ b/src/gc/heap.cc
@@ -0,0 +1,1936 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap.h"
+
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <limits>
+#include <vector>
+
+#include "base/stl_util.h"
+#include "cutils/sched_policy.h"
+#include "debugger.h"
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/mark_sweep-inl.h"
+#include "gc/collector/partial_mark_sweep.h"
+#include "gc/collector/sticky_mark_sweep.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "image.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "os.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+#include "thread_list.h"
+#include "UniquePtr.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace gc {
+
+// When to create a log message about a slow GC, 100ms.
+static const uint64_t kSlowGcThreshold = MsToNs(100);
+// When to create a log message about a slow pause, 5ms.
+static const uint64_t kLongGcPauseThreshold = MsToNs(5);
+static const bool kDumpGcPerformanceOnShutdown = false;
+// Minimum amount of remaining bytes before a concurrent GC is triggered.
+static const size_t kMinConcurrentRemainingBytes = 128 * KB;
+const double Heap::kDefaultTargetUtilization = 0.5;
+
+static bool GenerateImage(const std::string& image_file_name) {
+ const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
+ std::vector<std::string> boot_class_path;
+ Split(boot_class_path_string, ':', boot_class_path);
+ if (boot_class_path.empty()) {
+ LOG(FATAL) << "Failed to generate image because no boot class path specified";
+ }
+
+ std::vector<char*> arg_vector;
+
+ std::string dex2oat_string(GetAndroidRoot());
+ dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
+ const char* dex2oat = dex2oat_string.c_str();
+ arg_vector.push_back(strdup(dex2oat));
+
+ std::string image_option_string("--image=");
+ image_option_string += image_file_name;
+ const char* image_option = image_option_string.c_str();
+ arg_vector.push_back(strdup(image_option));
+
+ arg_vector.push_back(strdup("--runtime-arg"));
+ arg_vector.push_back(strdup("-Xms64m"));
+
+ arg_vector.push_back(strdup("--runtime-arg"));
+ arg_vector.push_back(strdup("-Xmx64m"));
+
+ for (size_t i = 0; i < boot_class_path.size(); i++) {
+ std::string dex_file_option_string("--dex-file=");
+ dex_file_option_string += boot_class_path[i];
+ const char* dex_file_option = dex_file_option_string.c_str();
+ arg_vector.push_back(strdup(dex_file_option));
+ }
+
+ std::string oat_file_option_string("--oat-file=");
+ oat_file_option_string += image_file_name;
+ oat_file_option_string.erase(oat_file_option_string.size() - 3);
+ oat_file_option_string += "oat";
+ const char* oat_file_option = oat_file_option_string.c_str();
+ arg_vector.push_back(strdup(oat_file_option));
+
+ std::string base_option_string(StringPrintf("--base=0x%x", ART_BASE_ADDRESS));
+ arg_vector.push_back(strdup(base_option_string.c_str()));
+
+ if (!kIsTargetBuild) {
+ arg_vector.push_back(strdup("--host"));
+ }
+
+ std::string command_line(Join(arg_vector, ' '));
+ LOG(INFO) << command_line;
+
+ arg_vector.push_back(NULL);
+ char** argv = &arg_vector[0];
+
+ // fork and exec dex2oat
+ pid_t pid = fork();
+ if (pid == 0) {
+ // no allocation allowed between fork and exec
+
+ // change process groups, so we don't get reaped by ProcessManager
+ setpgid(0, 0);
+
+ execv(dex2oat, argv);
+
+ PLOG(FATAL) << "execv(" << dex2oat << ") failed";
+ return false;
+ } else {
+ STLDeleteElements(&arg_vector);
+
+ // wait for dex2oat to finish
+ int status;
+ pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
+ if (got_pid != pid) {
+ PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
+ return false;
+ }
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ LOG(ERROR) << dex2oat << " failed: " << command_line;
+ return false;
+ }
+ }
+ return true;
+}
+
+void Heap::UnReserveOatFileAddressRange() {
+ oat_file_map_.reset(NULL);
+}
+
+Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
+ double target_utilization, size_t capacity,
+ const std::string& original_image_file_name, bool concurrent_gc)
+ : alloc_space_(NULL),
+ card_table_(NULL),
+ concurrent_gc_(concurrent_gc),
+ have_zygote_space_(false),
+ reference_queue_lock_(NULL),
+ is_gc_running_(false),
+ last_gc_type_(collector::kGcTypeNone),
+ capacity_(capacity),
+ growth_limit_(growth_limit),
+ max_allowed_footprint_(initial_size),
+ concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
+ : std::numeric_limits<size_t>::max()),
+ sticky_gc_count_(0),
+ sticky_to_partial_gc_ratio_(10),
+ total_bytes_freed_ever_(0),
+ total_objects_freed_ever_(0),
+ large_object_threshold_(3 * kPageSize),
+ num_bytes_allocated_(0),
+ verify_missing_card_marks_(false),
+ verify_system_weaks_(false),
+ verify_pre_gc_heap_(false),
+ verify_post_gc_heap_(false),
+ verify_mod_union_table_(false),
+ min_alloc_space_size_for_sticky_gc_(2 * MB),
+ min_remaining_space_for_sticky_gc_(1 * MB),
+ last_trim_time_ms_(0),
+ allocation_rate_(0),
+ max_allocation_stack_size_(kDesiredHeapVerification > kNoHeapVerification? KB : MB),
+ reference_referent_offset_(0),
+ reference_queue_offset_(0),
+ reference_queueNext_offset_(0),
+ reference_pendingNext_offset_(0),
+ finalizer_reference_zombie_offset_(0),
+ min_free_(min_free),
+ max_free_(max_free),
+ target_utilization_(target_utilization),
+ total_wait_time_(0),
+ measure_allocation_time_(false),
+ total_allocation_time_(0),
+ verify_object_mode_(kHeapVerificationNotPermitted) {
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Heap() entering";
+ }
+
+ live_bitmap_.reset(new accounting::HeapBitmap(this));
+ mark_bitmap_.reset(new accounting::HeapBitmap(this));
+
+ // Requested begin for the alloc space, to follow the mapped image and oat files
+ byte* requested_begin = NULL;
+ std::string image_file_name(original_image_file_name);
+ if (!image_file_name.empty()) {
+ space::ImageSpace* image_space = NULL;
+
+ if (OS::FileExists(image_file_name.c_str())) {
+ // If the /system file exists, it should be up-to-date, don't try to generate
+ image_space = space::ImageSpace::Create(image_file_name);
+ } else {
+ // If the /system file didn't exist, we need to use one from the dalvik-cache.
+ // If the cache file exists, try to open, but if it fails, regenerate.
+ // If it does not exist, generate.
+ image_file_name = GetDalvikCacheFilenameOrDie(image_file_name);
+ if (OS::FileExists(image_file_name.c_str())) {
+ image_space = space::ImageSpace::Create(image_file_name);
+ }
+ if (image_space == NULL) {
+ CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name;
+ image_space = space::ImageSpace::Create(image_file_name);
+ }
+ }
+
+ CHECK(image_space != NULL) << "Failed to create space from " << image_file_name;
+ AddContinuousSpace(image_space);
+ // Oat files referenced by image files immediately follow them in memory, ensure alloc space
+ // isn't going to get in the middle
+ byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
+ CHECK_GT(oat_file_end_addr, image_space->End());
+
+ // Reserve address range from image_space->End() to image_space->GetImageHeader().GetOatEnd()
+ uintptr_t reserve_begin = RoundUp(reinterpret_cast<uintptr_t>(image_space->End()), kPageSize);
+ uintptr_t reserve_end = RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), kPageSize);
+ oat_file_map_.reset(MemMap::MapAnonymous("oat file reserve",
+ reinterpret_cast<byte*>(reserve_begin),
+ reserve_end - reserve_begin, PROT_NONE));
+
+ if (oat_file_end_addr > requested_begin) {
+ requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr),
+ kPageSize));
+ }
+ }
+
+ // Allocate the large object space.
+ const bool kUseFreeListSpaceForLOS = false;
+ if (kUseFreeListSpaceForLOS) {
+ large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
+ } else {
+ large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
+ }
+ CHECK(large_object_space_ != NULL) << "Failed to create large object space";
+ AddDiscontinuousSpace(large_object_space_);
+
+ alloc_space_ = space::DlMallocSpace::Create("alloc space",
+ initial_size,
+ growth_limit, capacity,
+ requested_begin);
+ CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
+ alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
+ AddContinuousSpace(alloc_space_);
+
+ // Compute heap capacity. Continuous spaces are sorted in order of Begin().
+ byte* heap_begin = continuous_spaces_.front()->Begin();
+ size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin();
+ if (continuous_spaces_.back()->IsDlMallocSpace()) {
+ heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
+ }
+
+ // Mark image objects in the live bitmap
+ // TODO: C++0x
+ typedef std::vector<space::ContinuousSpace*>::iterator It;
+ for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ space::ImageSpace* image_space = space->AsImageSpace();
+ image_space->RecordImageAllocations(image_space->GetLiveBitmap());
+ }
+ }
+
+ // Allocate the card table.
+ card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
+ CHECK(card_table_.get() != NULL) << "Failed to create card table";
+
+ image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this));
+ CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table";
+
+ zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this));
+ CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
+
+ // TODO: Count objects in the image space here.
+ num_bytes_allocated_ = 0;
+
+ // Default mark stack size in bytes.
+ static const size_t default_mark_stack_size = 64 * KB;
+ mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
+ allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
+ max_allocation_stack_size_));
+ live_stack_.reset(accounting::ObjectStack::Create("live stack",
+ max_allocation_stack_size_));
+
+ // It's still too early to take a lock because there are no threads yet, but we can create locks
+ // now. We don't create it earlier to make it clear that you can't use locks during heap
+ // initialization.
+ gc_complete_lock_ = new Mutex("GC complete lock");
+ gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
+ *gc_complete_lock_));
+
+ // Create the reference queue lock, this is required so for parrallel object scanning in the GC.
+ reference_queue_lock_ = new Mutex("reference queue lock");
+
+ last_gc_time_ns_ = NanoTime();
+ last_gc_size_ = GetBytesAllocated();
+
+ // Create our garbage collectors.
+ for (size_t i = 0; i < 2; ++i) {
+ const bool concurrent = i != 0;
+ mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+ mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+ mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ }
+
+ CHECK(max_allowed_footprint_ != 0);
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Heap() exiting";
+ }
+}
+
+void Heap::CreateThreadPool() {
+ // TODO: Make sysconf(_SC_NPROCESSORS_CONF) be a helper function?
+ // Use the number of processors - 1 since the thread doing the GC does work while its waiting for
+ // workers to complete.
+ thread_pool_.reset(new ThreadPool(1)); // new ThreadPool(sysconf(_SC_NPROCESSORS_CONF) - 1));
+}
+
+void Heap::DeleteThreadPool() {
+ thread_pool_.reset(NULL);
+}
+
+// Sort spaces based on begin address
+struct ContinuousSpaceSorter {
+ bool operator ()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
+ return a->Begin() < b->Begin();
+ }
+};
+
+void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ DCHECK(space != NULL);
+ DCHECK(space->GetLiveBitmap() != NULL);
+ live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap());
+ DCHECK(space->GetMarkBitmap() != NULL);
+ mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap());
+ continuous_spaces_.push_back(space);
+ if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) {
+ alloc_space_ = space->AsDlMallocSpace();
+ }
+
+ // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
+ std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter());
+
+ // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
+ // avoid redundant marking.
+ bool seen_zygote = false, seen_alloc = false;
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ DCHECK(!seen_zygote);
+ DCHECK(!seen_alloc);
+ } else if (space->IsZygoteSpace()) {
+ DCHECK(!seen_alloc);
+ seen_zygote = true;
+ } else if (space->IsDlMallocSpace()) {
+ seen_alloc = true;
+ }
+ }
+}
+
+void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) {
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ DCHECK(space != NULL);
+ DCHECK(space->GetLiveObjects() != NULL);
+ live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects());
+ DCHECK(space->GetMarkObjects() != NULL);
+ mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects());
+ discontinuous_spaces_.push_back(space);
+}
+
+void Heap::DumpGcPerformanceInfo(std::ostream& os) {
+ // Dump cumulative timings.
+ os << "Dumping cumulative Gc timings\n";
+ uint64_t total_duration = 0;
+
+ // Dump cumulative loggers for each GC type.
+ // TODO: C++0x
+ uint64_t total_paused_time = 0;
+ typedef std::vector<collector::MarkSweep*>::const_iterator It;
+ for (It it = mark_sweep_collectors_.begin();
+ it != mark_sweep_collectors_.end(); ++it) {
+ collector::MarkSweep* collector = *it;
+ CumulativeLogger& logger = collector->GetCumulativeTimings();
+ if (logger.GetTotalNs() != 0) {
+ os << Dumpable<CumulativeLogger>(logger);
+ const uint64_t total_ns = logger.GetTotalNs();
+ const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs();
+ double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+ const uint64_t freed_bytes = collector->GetTotalFreedBytes();
+ const uint64_t freed_objects = collector->GetTotalFreedObjects();
+ os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
+ << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n"
+ << collector->GetName() << " freed: " << freed_objects
+ << " objects with total size " << PrettySize(freed_bytes) << "\n"
+ << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
+ << PrettySize(freed_bytes / seconds) << "/s\n";
+ total_duration += total_ns;
+ total_paused_time += total_pause_ns;
+ }
+ }
+ uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
+ size_t total_objects_allocated = GetObjectsAllocatedEver();
+ size_t total_bytes_allocated = GetBytesAllocatedEver();
+ if (total_duration != 0) {
+ const double total_seconds = double(total_duration / 1000) / 1000000.0;
+ os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
+ os << "Mean GC size throughput: "
+ << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
+ os << "Mean GC object throughput: "
+ << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
+ }
+ os << "Total number of allocations: " << total_objects_allocated << "\n";
+ os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+ if (measure_allocation_time_) {
+ os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
+ os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
+ << "\n";
+ }
+ os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
+ os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
+}
+
+Heap::~Heap() {
+ if (kDumpGcPerformanceOnShutdown) {
+ DumpGcPerformanceInfo(LOG(INFO));
+ }
+
+ STLDeleteElements(&mark_sweep_collectors_);
+
+ // If we don't reset then the mark stack complains in it's destructor.
+ allocation_stack_->Reset();
+ live_stack_->Reset();
+
+ VLOG(heap) << "~Heap()";
+ // We can't take the heap lock here because there might be a daemon thread suspended with the
+ // heap lock held. We know though that no non-daemon threads are executing, and we know that
+ // all daemon threads are suspended, and we also know that the threads list have been deleted, so
+ // those threads can't resume. We're the only running thread, and we can do whatever we like...
+ STLDeleteElements(&continuous_spaces_);
+ STLDeleteElements(&discontinuous_spaces_);
+ delete gc_complete_lock_;
+ delete reference_queue_lock_;
+}
+
+space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
+ bool fail_ok) const {
+ // TODO: C++0x auto
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ if ((*it)->Contains(obj)) {
+ return *it;
+ }
+ }
+ if (!fail_ok) {
+ LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+ }
+ return NULL;
+}
+
+space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
+ bool fail_ok) const {
+ // TODO: C++0x auto
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It;
+ for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ if ((*it)->Contains(obj)) {
+ return *it;
+ }
+ }
+ if (!fail_ok) {
+ LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+ }
+ return NULL;
+}
+
+space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
+ space::Space* result = FindContinuousSpaceFromObject(obj, true);
+ if (result != NULL) {
+ return result;
+ }
+ return FindDiscontinuousSpaceFromObject(obj, true);
+}
+
+space::ImageSpace* Heap::GetImageSpace() const {
+ // TODO: C++0x auto
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ if ((*it)->IsImageSpace()) {
+ return (*it)->AsImageSpace();
+ }
+ }
+ return NULL;
+}
+
+static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
+ size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
+ if (used_bytes < chunk_size) {
+ size_t chunk_free_bytes = chunk_size - used_bytes;
+ size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
+ max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
+ }
+}
+
+mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) {
+ DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
+ strlen(ClassHelper(c).GetDescriptor()) == 0);
+ DCHECK_GE(byte_count, sizeof(mirror::Object));
+
+ mirror::Object* obj = NULL;
+ size_t size = 0;
+ uint64_t allocation_start = 0;
+ if (measure_allocation_time_) {
+ allocation_start = NanoTime();
+ }
+
+ // We need to have a zygote space or else our newly allocated large object can end up in the
+ // Zygote resulting in it being prematurely freed.
+ // We can only do this for primive objects since large objects will not be within the card table
+ // range. This also means that we rely on SetClass not dirtying the object's card.
+ if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
+ size = RoundUp(byte_count, kPageSize);
+ obj = Allocate(self, large_object_space_, size);
+ // Make sure that our large object didn't get placed anywhere within the space interval or else
+ // it breaks the immune range.
+ DCHECK(obj == NULL ||
+ reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
+ reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
+ } else {
+ obj = Allocate(self, alloc_space_, byte_count);
+
+ // Ensure that we did not allocate into a zygote space.
+ DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
+ size = alloc_space_->AllocationSize(obj);
+ }
+
+ if (LIKELY(obj != NULL)) {
+ obj->SetClass(c);
+
+ // Record allocation after since we want to use the atomic add for the atomic fence to guard
+ // the SetClass since we do not want the class to appear NULL in another thread.
+ RecordAllocation(size, obj);
+
+ if (Dbg::IsAllocTrackingEnabled()) {
+ Dbg::RecordAllocation(c, byte_count);
+ }
+ if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
+ // We already have a request pending, no reason to start more until we update
+ // concurrent_start_bytes_.
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
+ SirtRef<mirror::Object> ref(self, obj);
+ RequestConcurrentGC(self);
+ }
+ VerifyObject(obj);
+
+ if (measure_allocation_time_) {
+ total_allocation_time_ += (NanoTime() - allocation_start) / kTimeAdjust;
+ }
+
+ return obj;
+ }
+ std::ostringstream oss;
+ int64_t total_bytes_free = GetFreeMemory();
+ uint64_t alloc_space_size = alloc_space_->GetBytesAllocated();
+ uint64_t large_object_size = large_object_space_->GetObjectsAllocated();
+ oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
+ << " free bytes; allocation space size " << alloc_space_size
+ << "; large object space size " << large_object_size;
+ // If the allocation failed due to fragmentation, print out the largest continuous allocation.
+ if (total_bytes_free >= byte_count) {
+ size_t max_contiguous_allocation = 0;
+ // TODO: C++0x auto
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace()) {
+ space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
+ }
+ }
+ oss << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ }
+ self->ThrowOutOfMemoryError(oss.str().c_str());
+ return NULL;
+}
+
+bool Heap::IsHeapAddress(const mirror::Object* obj) {
+ // Note: we deliberately don't take the lock here, and mustn't test anything that would
+ // require taking the lock.
+ if (obj == NULL) {
+ return true;
+ }
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ return false;
+ }
+ return FindSpaceFromObject(obj, true) != NULL;
+}
+
+bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
+ //Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
+ if (obj == NULL) {
+ return false;
+ }
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ return false;
+ }
+ space::ContinuousSpace* cont_space = FindContinuousSpaceFromObject(obj, true);
+ if (cont_space != NULL) {
+ if (cont_space->GetLiveBitmap()->Test(obj)) {
+ return true;
+ }
+ } else {
+ space::DiscontinuousSpace* disc_space = FindDiscontinuousSpaceFromObject(obj, true);
+ if (disc_space != NULL) {
+ if (disc_space->GetLiveObjects()->Test(obj)) {
+ return true;
+ }
+ }
+ }
+ for (size_t i = 0; i < 5; ++i) {
+ if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) ||
+ live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+ return true;
+ }
+ NanoSleep(MsToNs(10));
+ }
+ return false;
+}
+
+void Heap::VerifyObjectImpl(const mirror::Object* obj) {
+ if (Thread::Current() == NULL ||
+ Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
+ return;
+ }
+ VerifyObjectBody(obj);
+}
+
+void Heap::DumpSpaces() {
+ // TODO: C++0x auto
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ LOG(INFO) << space << " " << *space << "\n"
+ << live_bitmap << " " << *live_bitmap << "\n"
+ << mark_bitmap << " " << *mark_bitmap;
+ }
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ space::DiscontinuousSpace* space = *it;
+ LOG(INFO) << space << " " << *space << "\n";
+ }
+}
+
+void Heap::VerifyObjectBody(const mirror::Object* obj) {
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ LOG(FATAL) << "Object isn't aligned: " << obj;
+ }
+ if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications.
+ return;
+ }
+ const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
+ mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ if (UNLIKELY(c == NULL)) {
+ LOG(FATAL) << "Null class in object: " << obj;
+ } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
+ LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
+ }
+ // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
+ // Note: we don't use the accessors here as they have internal sanity checks
+ // that we don't want to run
+ raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ CHECK_EQ(c_c, c_c_c);
+
+ if (verify_object_mode_ != kVerifyAllFast) {
+ // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
+ // heap_bitmap_lock_.
+ if (!IsLiveObjectLocked(obj)) {
+ DumpSpaces();
+ LOG(FATAL) << "Object is dead: " << obj;
+ }
+ if (!IsLiveObjectLocked(c)) {
+ LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
+ }
+ }
+}
+
+void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
+ DCHECK(obj != NULL);
+ reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
+}
+
+void Heap::VerifyHeap() {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
+}
+
+void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
+ DCHECK(obj != NULL);
+ DCHECK_GT(size, 0u);
+ num_bytes_allocated_ += size;
+
+ if (Runtime::Current()->HasStatsEnabled()) {
+ RuntimeStats* thread_stats = Thread::Current()->GetStats();
+ ++thread_stats->allocated_objects;
+ thread_stats->allocated_bytes += size;
+
+ // TODO: Update these atomically.
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ ++global_stats->allocated_objects;
+ global_stats->allocated_bytes += size;
+ }
+
+ // This is safe to do since the GC will never free objects which are neither in the allocation
+ // stack or the live bitmap.
+ while (!allocation_stack_->AtomicPushBack(obj)) {
+ CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ }
+}
+
+void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
+ DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
+ num_bytes_allocated_ -= freed_bytes;
+
+ if (Runtime::Current()->HasStatsEnabled()) {
+ RuntimeStats* thread_stats = Thread::Current()->GetStats();
+ thread_stats->freed_objects += freed_objects;
+ thread_stats->freed_bytes += freed_bytes;
+
+ // TODO: Do this concurrently.
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ global_stats->freed_objects += freed_objects;
+ global_stats->freed_bytes += freed_bytes;
+ }
+}
+
+mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size,
+ bool grow) {
+ // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
+ if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
+ // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here.
+ if (num_bytes_allocated_ + alloc_size > growth_limit_) {
+ // Completely out of memory.
+ return NULL;
+ }
+ }
+
+ return space->Alloc(self, alloc_size);
+}
+
+mirror::Object* Heap::Allocate(Thread* self, space::AllocSpace* space, size_t alloc_size) {
+ // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
+ // done in the runnable state where suspension is expected.
+ DCHECK_EQ(self->GetState(), kRunnable);
+ self->AssertThreadSuspensionIsAllowable();
+
+ mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false);
+ if (ptr != NULL) {
+ return ptr;
+ }
+
+ // The allocation failed. If the GC is running, block until it completes, and then retry the
+ // allocation.
+ collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
+ if (last_gc != collector::kGcTypeNone) {
+ // A GC was in progress and we blocked, retry allocation now that memory has been freed.
+ ptr = TryToAllocate(self, space, alloc_size, false);
+ if (ptr != NULL) {
+ return ptr;
+ }
+ }
+
+ // Loop through our different Gc types and try to Gc until we get enough free memory.
+ for (size_t i = static_cast<size_t>(last_gc) + 1;
+ i < static_cast<size_t>(collector::kGcTypeMax); ++i) {
+ bool run_gc = false;
+ collector::GcType gc_type = static_cast<collector::GcType>(i);
+ switch (gc_type) {
+ case collector::kGcTypeSticky: {
+ const size_t alloc_space_size = alloc_space_->Size();
+ run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
+ alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_;
+ break;
+ }
+ case collector::kGcTypePartial:
+ run_gc = have_zygote_space_;
+ break;
+ case collector::kGcTypeFull:
+ run_gc = true;
+ break;
+ default:
+ break;
+ }
+
+ if (run_gc) {
+ // If we actually ran a different type of Gc than requested, we can skip the index forwards.
+ collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
+ DCHECK_GE(static_cast<size_t>(gc_type_ran), i);
+ i = static_cast<size_t>(gc_type_ran);
+
+ // Did we free sufficient memory for the allocation to succeed?
+ ptr = TryToAllocate(self, space, alloc_size, false);
+ if (ptr != NULL) {
+ return ptr;
+ }
+ }
+ }
+
+ // Allocations have failed after GCs; this is an exceptional state.
+ // Try harder, growing the heap if necessary.
+ ptr = TryToAllocate(self, space, alloc_size, true);
+ if (ptr != NULL) {
+ return ptr;
+ }
+
+ // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+ // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+ // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
+
+ // OLD-TODO: wait for the finalizers from the previous GC to finish
+ VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+ << " allocation";
+
+ // We don't need a WaitForConcurrentGcToComplete here either.
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+ return TryToAllocate(self, space, alloc_size, true);
+}
+
+void Heap::SetTargetHeapUtilization(float target) {
+ DCHECK_GT(target, 0.0f); // asserted in Java code
+ DCHECK_LT(target, 1.0f);
+ target_utilization_ = target;
+}
+
+size_t Heap::GetObjectsAllocated() const {
+ size_t total = 0;
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace()) {
+ total += space->AsDlMallocSpace()->GetObjectsAllocated();
+ }
+ }
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ space::DiscontinuousSpace* space = *it;
+ total += space->AsLargeObjectSpace()->GetObjectsAllocated();
+ }
+ return total;
+}
+
+size_t Heap::GetObjectsAllocatedEver() const {
+ size_t total = 0;
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace()) {
+ total += space->AsDlMallocSpace()->GetTotalObjectsAllocated();
+ }
+ }
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ space::DiscontinuousSpace* space = *it;
+ total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated();
+ }
+ return total;
+}
+
+size_t Heap::GetBytesAllocatedEver() const {
+ size_t total = 0;
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsDlMallocSpace()) {
+ total += space->AsDlMallocSpace()->GetTotalBytesAllocated();
+ }
+ }
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ space::DiscontinuousSpace* space = *it;
+ total += space->AsLargeObjectSpace()->GetTotalBytesAllocated();
+ }
+ return total;
+}
+
+class InstanceCounter {
+ public:
+ InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
+ }
+
+ void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < classes_.size(); ++i) {
+ const mirror::Class* instance_class = o->GetClass();
+ if (use_is_assignable_from_) {
+ if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
+ ++counts_[i];
+ }
+ } else {
+ if (instance_class == classes_[i]) {
+ ++counts_[i];
+ }
+ }
+ }
+ }
+
+ private:
+ const std::vector<mirror::Class*>& classes_;
+ bool use_is_assignable_from_;
+ uint64_t* const counts_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
+};
+
+void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
+ uint64_t* counts) {
+ // We only want reachable instances, so do a GC. This also ensures that the alloc stack
+ // is empty, so the live bitmap is the only place we need to look.
+ Thread* self = Thread::Current();
+ self->TransitionFromRunnableToSuspended(kNative);
+ CollectGarbage(false);
+ self->TransitionFromSuspendedToRunnable();
+
+ InstanceCounter counter(classes, use_is_assignable_from, counts);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Visit(counter);
+}
+
+class InstanceCollector {
+ public:
+ InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : class_(c), max_count_(max_count), instances_(instances) {
+ }
+
+ void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const mirror::Class* instance_class = o->GetClass();
+ if (instance_class == class_) {
+ if (max_count_ == 0 || instances_.size() < max_count_) {
+ instances_.push_back(const_cast<mirror::Object*>(o));
+ }
+ }
+ }
+
+ private:
+ mirror::Class* class_;
+ uint32_t max_count_;
+ std::vector<mirror::Object*>& instances_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
+};
+
+void Heap::GetInstances(mirror::Class* c, int32_t max_count,
+ std::vector<mirror::Object*>& instances) {
+ // We only want reachable instances, so do a GC. This also ensures that the alloc stack
+ // is empty, so the live bitmap is the only place we need to look.
+ Thread* self = Thread::Current();
+ self->TransitionFromRunnableToSuspended(kNative);
+ CollectGarbage(false);
+ self->TransitionFromSuspendedToRunnable();
+
+ InstanceCollector collector(c, max_count, instances);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Visit(collector);
+}
+
+class ReferringObjectsFinder {
+ public:
+ ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
+ std::vector<mirror::Object*>& referring_objects)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
+ }
+
+ // For bitmap Visit.
+ // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
+ // annotalysis on visitors.
+ void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ collector::MarkSweep::VisitObjectReferences(o, *this);
+ }
+
+ // For MarkSweep::VisitObjectReferences.
+ void operator ()(const mirror::Object* referrer, const mirror::Object* object,
+ const MemberOffset&, bool) const {
+ if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
+ referring_objects_.push_back(const_cast<mirror::Object*>(referrer));
+ }
+ }
+
+ private:
+ mirror::Object* object_;
+ uint32_t max_count_;
+ std::vector<mirror::Object*>& referring_objects_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
+};
+
+void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
+ std::vector<mirror::Object*>& referring_objects) {
+ // We only want reachable instances, so do a GC. This also ensures that the alloc stack
+ // is empty, so the live bitmap is the only place we need to look.
+ Thread* self = Thread::Current();
+ self->TransitionFromRunnableToSuspended(kNative);
+ CollectGarbage(false);
+ self->TransitionFromSuspendedToRunnable();
+
+ ReferringObjectsFinder finder(o, max_count, referring_objects);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Visit(finder);
+}
+
+void Heap::CollectGarbage(bool clear_soft_references) {
+ // Even if we waited for a GC we still need to do another GC since weaks allocated during the
+ // last GC will not have necessarily been cleared.
+ Thread* self = Thread::Current();
+ WaitForConcurrentGcToComplete(self);
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
+}
+
+void Heap::PreZygoteFork() {
+ static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
+ // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
+ CollectGarbage(false);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, zygote_creation_lock_);
+
+ // Try to see if we have any Zygote spaces.
+ if (have_zygote_space_) {
+ return;
+ }
+
+ VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
+
+ {
+ // Flush the alloc stack.
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ FlushAllocStack();
+ }
+
+ // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
+ // of the remaining available heap memory.
+ space::DlMallocSpace* zygote_space = alloc_space_;
+ alloc_space_ = zygote_space->CreateZygoteSpace();
+ alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
+
+ // Change the GC retention policy of the zygote space to only collect when full.
+ zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
+ AddContinuousSpace(alloc_space_);
+ have_zygote_space_ = true;
+
+ // Reset the cumulative loggers since we now have a few additional timing phases.
+ // TODO: C++0x
+ typedef std::vector<collector::MarkSweep*>::const_iterator It;
+ for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
+ it != end; ++it) {
+ (*it)->ResetCumulativeStatistics();
+ }
+}
+
+void Heap::FlushAllocStack() {
+ MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
+ allocation_stack_.get());
+ allocation_stack_->Reset();
+}
+
+void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+ accounting::ObjectStack* stack) {
+ mirror::Object** limit = stack->End();
+ for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
+ const mirror::Object* obj = *it;
+ DCHECK(obj != NULL);
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ bitmap->Set(obj);
+ } else {
+ large_objects->Set(obj);
+ }
+ }
+}
+
+void Heap::UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+ accounting::ObjectStack* stack) {
+ mirror::Object** limit = stack->End();
+ for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
+ const mirror::Object* obj = *it;
+ DCHECK(obj != NULL);
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ bitmap->Clear(obj);
+ } else {
+ large_objects->Clear(obj);
+ }
+ }
+}
+
+collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
+ bool clear_soft_references) {
+ Thread* self = Thread::Current();
+ ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
+ Locks::mutator_lock_->AssertNotHeld(self);
+
+ if (self->IsHandlingStackOverflow()) {
+ LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
+ }
+
+ // Ensure there is only one GC at a time.
+ bool start_collect = false;
+ while (!start_collect) {
+ {
+ MutexLock mu(self, *gc_complete_lock_);
+ if (!is_gc_running_) {
+ is_gc_running_ = true;
+ start_collect = true;
+ }
+ }
+ if (!start_collect) {
+ WaitForConcurrentGcToComplete(self);
+ // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
+ // Not doing at the moment to ensure soft references are cleared.
+ }
+ }
+ gc_complete_lock_->AssertNotHeld(self);
+
+ if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) {
+ ++Runtime::Current()->GetStats()->gc_for_alloc_count;
+ ++Thread::Current()->GetStats()->gc_for_alloc_count;
+ }
+
+ // We need to do partial GCs every now and then to avoid the heap growing too much and
+ // fragmenting.
+ // TODO: if sticky GCs are failing to free memory then we should lower the
+ // sticky_to_partial_gc_ratio_, if they are successful we can increase it.
+ if (gc_type == collector::kGcTypeSticky) {
+ ++sticky_gc_count_;
+ if (sticky_gc_count_ >= sticky_to_partial_gc_ratio_) {
+ gc_type = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ sticky_gc_count_ = 0;
+ }
+ } else {
+ sticky_gc_count_ = 0;
+ }
+
+ uint64_t gc_start_time_ns = NanoTime();
+ uint64_t gc_start_size = GetBytesAllocated();
+ // Approximate allocation rate in bytes / second.
+ if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) {
+ LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_).";
+ }
+ uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
+ if (ms_delta != 0) {
+ allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
+ VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
+ }
+
+ DCHECK_LT(gc_type, collector::kGcTypeMax);
+ DCHECK_NE(gc_type, collector::kGcTypeNone);
+ collector::MarkSweep* collector = NULL;
+ typedef std::vector<collector::MarkSweep*>::iterator It;
+ for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
+ it != end; ++it) {
+ collector::MarkSweep* cur_collector = *it;
+ if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
+ collector = cur_collector;
+ break;
+ }
+ }
+ CHECK(collector != NULL)
+ << "Could not find garbage collector with concurrent=" << concurrent_gc_
+ << " and type=" << gc_type;
+ collector->clear_soft_references_ = clear_soft_references;
+ collector->Run();
+ total_objects_freed_ever_ += collector->GetFreedObjects();
+ total_bytes_freed_ever_ += collector->GetFreedBytes();
+
+ const size_t duration = collector->GetDurationNs();
+ std::vector<uint64_t> pauses = collector->GetPauseTimes();
+ bool was_slow = duration > kSlowGcThreshold ||
+ (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold);
+ for (size_t i = 0; i < pauses.size(); ++i) {
+ if (pauses[i] > kLongGcPauseThreshold) {
+ was_slow = true;
+ }
+ }
+
+ if (was_slow) {
+ const size_t percent_free = GetPercentFree();
+ const size_t current_heap_size = GetBytesAllocated();
+ const size_t total_memory = GetTotalMemory();
+ std::ostringstream pause_string;
+ for (size_t i = 0; i < pauses.size(); ++i) {
+ pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
+ << ((i != pauses.size() - 1) ? ", " : "");
+ }
+ LOG(INFO) << gc_cause << " " << collector->GetName()
+ << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", "
+ << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
+ << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
+ << " total " << PrettyDuration((duration / 1000) * 1000);
+ if (VLOG_IS_ON(heap)) {
+ LOG(INFO) << Dumpable<base::NewTimingLogger>(collector->GetTimings());
+ }
+ }
+
+ {
+ MutexLock mu(self, *gc_complete_lock_);
+ is_gc_running_ = false;
+ last_gc_type_ = gc_type;
+ // Wake anyone who may have been waiting for the GC to complete.
+ gc_complete_cond_->Broadcast(self);
+ }
+ // Inform DDMS that a GC completed.
+ Dbg::GcDidFinish();
+ return gc_type;
+}
+
+void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+ collector::GcType gc_type) {
+ if (gc_type == collector::kGcTypeSticky) {
+ // Don't need to do anything for mod union table in this case since we are only scanning dirty
+ // cards.
+ return;
+ }
+
+ // Update zygote mod union table.
+ if (gc_type == collector::kGcTypePartial) {
+ timings.NewSplit("UpdateZygoteModUnionTable");
+ zygote_mod_union_table_->Update();
+
+ timings.NewSplit("ZygoteMarkReferences");
+ zygote_mod_union_table_->MarkReferences(mark_sweep);
+ }
+
+ // Processes the cards we cleared earlier and adds their objects into the mod-union table.
+ timings.NewSplit("UpdateModUnionTable");
+ image_mod_union_table_->Update();
+
+ // Scans all objects in the mod-union table.
+ timings.NewSplit("MarkImageToAllocSpaceReferences");
+ image_mod_union_table_->MarkReferences(mark_sweep);
+}
+
+static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
+ if (root == obj) {
+ LOG(INFO) << "Object " << obj << " is a root";
+ }
+}
+
+class ScanVisitor {
+ public:
+ void operator ()(const mirror::Object* obj) const {
+ LOG(INFO) << "Would have rescanned object " << obj;
+ }
+};
+
+// Verify a reference from an object.
+class VerifyReferenceVisitor {
+ public:
+ VerifyReferenceVisitor(Heap* heap)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ : heap_(heap), failed_(false) {
+ }
+
+ bool Failed() const {
+ return failed_;
+ }
+
+ // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
+ // analysis on visitors.
+ void operator ()(const mirror::Object* obj, const mirror::Object* ref,
+ const MemberOffset& offset, bool /* is_static */) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ // Verify that the reference is live.
+ if (UNLIKELY(ref != NULL && !IsLive(ref))) {
+ accounting::CardTable* card_table = heap_->GetCardTable();
+ accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
+ accounting::ObjectStack* live_stack = heap_->live_stack_.get();
+
+ if (obj != NULL) {
+ byte* card_addr = card_table->CardFromAddr(obj);
+ LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset
+ << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty)
+ << "\nObj type " << PrettyTypeOf(obj)
+ << "\nRef type " << PrettyTypeOf(ref);
+ card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+ void* cover_begin = card_table->AddrFromCard(card_addr);
+ void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
+ accounting::CardTable::kCardSize);
+ LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
+ << "-" << cover_end;
+ accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
+
+ // Print out how the object is live.
+ if (bitmap != NULL && bitmap->Test(obj)) {
+ LOG(ERROR) << "Object " << obj << " found in live bitmap";
+ }
+ if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ LOG(ERROR) << "Object " << obj << " found in allocation stack";
+ }
+ if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ LOG(ERROR) << "Object " << obj << " found in live stack";
+ }
+ // Attempt to see if the card table missed the reference.
+ ScanVisitor scan_visitor;
+ byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+ card_table->Scan(bitmap, byte_cover_begin,
+ byte_cover_begin + accounting::CardTable::kCardSize,
+ scan_visitor, VoidFunctor());
+
+ // Search to see if any of the roots reference our object.
+ void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
+ Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+
+ // Search to see if any of the roots reference our reference.
+ arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
+ Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+ } else {
+ LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
+ }
+ if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ LOG(ERROR) << "Reference " << ref << " found in allocation stack!";
+ }
+ if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ LOG(ERROR) << "Reference " << ref << " found in live stack!";
+ }
+ heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
+ heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: ");
+ failed_ = true;
+ }
+ }
+
+ bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ return heap_->IsLiveObjectLocked(obj);
+ }
+
+ static void VerifyRoots(const mirror::Object* root, void* arg) {
+ VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
+ (*visitor)(NULL, root, MemberOffset(0), true);
+ }
+
+ private:
+ Heap* const heap_;
+ mutable bool failed_;
+};
+
+// Verify all references within an object, for use with HeapBitmap::Visit.
+class VerifyObjectVisitor {
+ public:
+ VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {
+ }
+
+ void operator ()(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ // Note: we are verifying the references in obj but not obj itself, this is because obj must
+ // be live or else how did we find it in the live bitmap?
+ VerifyReferenceVisitor visitor(heap_);
+ collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ failed_ = failed_ || visitor.Failed();
+ }
+
+ bool Failed() const {
+ return failed_;
+ }
+
+ private:
+ Heap* const heap_;
+ mutable bool failed_;
+};
+
+// Must do this with mutators suspended since we are directly accessing the allocation stacks.
+bool Heap::VerifyHeapReferences() {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ // Lets sort our allocation stacks so that we can efficiently binary search them.
+ allocation_stack_->Sort();
+ live_stack_->Sort();
+ // Perform the verification.
+ VerifyObjectVisitor visitor(this);
+ Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
+ GetLiveBitmap()->Visit(visitor);
+ // We don't want to verify the objects in the allocation stack since they themselves may be
+ // pointing to dead objects if they are not reachable.
+ if (visitor.Failed()) {
+ DumpSpaces();
+ return false;
+ }
+ return true;
+}
+
+class VerifyReferenceCardVisitor {
+ public:
+ VerifyReferenceCardVisitor(Heap* heap, bool* failed)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+ Locks::heap_bitmap_lock_)
+ : heap_(heap), failed_(failed) {
+ }
+
+ // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
+ // annotalysis on visitors.
+ void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
+ bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
+ // Filter out class references since changing an object's class does not mark the card as dirty.
+ // Also handles large objects, since the only reference they hold is a class reference.
+ if (ref != NULL && !ref->IsClass()) {
+ accounting::CardTable* card_table = heap_->GetCardTable();
+ // If the object is not dirty and it is referencing something in the live stack other than
+ // class, then it must be on a dirty card.
+ if (!card_table->AddrIsInCardTable(obj)) {
+ LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
+ *failed_ = true;
+ } else if (!card_table->IsDirty(obj)) {
+ // Card should be either kCardDirty if it got re-dirtied after we aged it, or
+ // kCardDirty - 1 if it didnt get touched since we aged it.
+ accounting::ObjectStack* live_stack = heap_->live_stack_.get();
+ if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ LOG(ERROR) << "Object " << obj << " found in live stack";
+ }
+ if (heap_->GetLiveBitmap()->Test(obj)) {
+ LOG(ERROR) << "Object " << obj << " found in live bitmap";
+ }
+ LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
+ << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
+
+ // Print which field of the object is dead.
+ if (!obj->IsObjectArray()) {
+ const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ CHECK(klass != NULL);
+ const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields()
+ : klass->GetIFields();
+ CHECK(fields != NULL);
+ for (int32_t i = 0; i < fields->GetLength(); ++i) {
+ const mirror::Field* cur = fields->Get(i);
+ if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
+ LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
+ << PrettyField(cur);
+ break;
+ }
+ }
+ } else {
+ const mirror::ObjectArray<mirror::Object>* object_array =
+ obj->AsObjectArray<mirror::Object>();
+ for (int32_t i = 0; i < object_array->GetLength(); ++i) {
+ if (object_array->Get(i) == ref) {
+ LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
+ }
+ }
+ }
+
+ *failed_ = true;
+ }
+ }
+ }
+ }
+
+ private:
+ Heap* const heap_;
+ bool* const failed_;
+};
+
+class VerifyLiveStackReferences {
+ public:
+ VerifyLiveStackReferences(Heap* heap)
+ : heap_(heap),
+ failed_(false) {
+
+ }
+
+ void operator ()(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
+ collector::MarkSweep::VisitObjectReferences(obj, visitor);
+ }
+
+ bool Failed() const {
+ return failed_;
+ }
+
+ private:
+ Heap* const heap_;
+ bool failed_;
+};
+
+bool Heap::VerifyMissingCardMarks() {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+
+ // We need to sort the live stack since we binary search it.
+ live_stack_->Sort();
+ VerifyLiveStackReferences visitor(this);
+ GetLiveBitmap()->Visit(visitor);
+
+ // We can verify objects in the live stack since none of these should reference dead objects.
+ for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
+ visitor(*it);
+ }
+
+ if (visitor.Failed()) {
+ DumpSpaces();
+ return false;
+ }
+ return true;
+}
+
+void Heap::SwapStacks() {
+ allocation_stack_.swap(live_stack_);
+
+ // Sort the live stack so that we can quickly binary search it later.
+ if (verify_object_mode_ > kNoHeapVerification) {
+ live_stack_->Sort();
+ }
+}
+
+void Heap::ProcessCards(base::NewTimingLogger& timings) {
+ // Clear cards and keep track of cards cleared in the mod-union table.
+ typedef std::vector<space::ContinuousSpace*>::iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ timings.NewSplit("ModUnionClearCards");
+ image_mod_union_table_->ClearCards(space);
+ } else if (space->IsZygoteSpace()) {
+ timings.NewSplit("ZygoteModUnionClearCards");
+ zygote_mod_union_table_->ClearCards(space);
+ } else {
+ // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
+ // were dirty before the GC started.
+ timings.NewSplit("AllocSpaceClearCards");
+ card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+ }
+ }
+}
+
+void Heap::PreGcVerification(collector::GarbageCollector* gc) {
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ Thread* self = Thread::Current();
+
+ if (verify_pre_gc_heap_) {
+ thread_list->SuspendAll();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ if (!VerifyHeapReferences()) {
+ LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
+ }
+ }
+ thread_list->ResumeAll();
+ }
+
+ // Check that all objects which reference things in the live stack are on dirty cards.
+ if (verify_missing_card_marks_) {
+ thread_list->SuspendAll();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ SwapStacks();
+ // Sort the live stack so that we can quickly binary search it later.
+ if (!VerifyMissingCardMarks()) {
+ LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
+ }
+ SwapStacks();
+ }
+ thread_list->ResumeAll();
+ }
+
+ if (verify_mod_union_table_) {
+ thread_list->SuspendAll();
+ ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
+ zygote_mod_union_table_->Update();
+ zygote_mod_union_table_->Verify();
+ image_mod_union_table_->Update();
+ image_mod_union_table_->Verify();
+ thread_list->ResumeAll();
+ }
+}
+
+void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+
+ // Called before sweeping occurs since we want to make sure we are not going so reclaim any
+ // reachable objects.
+ if (verify_post_gc_heap_) {
+ Thread* self = Thread::Current();
+ CHECK_NE(self->GetState(), kRunnable);
+ Locks::mutator_lock_->SharedUnlock(self);
+ thread_list->SuspendAll();
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // Swapping bound bitmaps does nothing.
+ gc->SwapBitmaps();
+ if (!VerifyHeapReferences()) {
+ LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed";
+ }
+ gc->SwapBitmaps();
+ }
+ thread_list->ResumeAll();
+ Locks::mutator_lock_->SharedLock(self);
+ }
+}
+
+void Heap::PostGcVerification(collector::GarbageCollector* gc) {
+ Thread* self = Thread::Current();
+
+ if (verify_system_weaks_) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
+ mark_sweep->VerifySystemWeaks();
+ }
+}
+
+collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
+ collector::GcType last_gc_type = collector::kGcTypeNone;
+ if (concurrent_gc_) {
+ bool do_wait;
+ uint64_t wait_start = NanoTime();
+ {
+ // Check if GC is running holding gc_complete_lock_.
+ MutexLock mu(self, *gc_complete_lock_);
+ do_wait = is_gc_running_;
+ }
+ if (do_wait) {
+ uint64_t wait_time;
+ // We must wait, change thread state then sleep on gc_complete_cond_;
+ ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
+ {
+ MutexLock mu(self, *gc_complete_lock_);
+ while (is_gc_running_) {
+ gc_complete_cond_->Wait(self);
+ }
+ last_gc_type = last_gc_type_;
+ wait_time = NanoTime() - wait_start;;
+ total_wait_time_ += wait_time;
+ }
+ if (wait_time > kLongGcPauseThreshold) {
+ LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
+ }
+ }
+ }
+ return last_gc_type;
+}
+
+void Heap::DumpForSigQuit(std::ostream& os) {
+ os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
+ << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
+ DumpGcPerformanceInfo(os);
+}
+
+size_t Heap::GetPercentFree() {
+ return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
+}
+
+void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
+ if (max_allowed_footprint > GetMaxMemory()) {
+ VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
+ << PrettySize(GetMaxMemory());
+ max_allowed_footprint = GetMaxMemory();
+ }
+ max_allowed_footprint_ = max_allowed_footprint;
+}
+
+void Heap::GrowForUtilization(uint64_t gc_duration) {
+ // We know what our utilization is at this moment.
+ // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
+ const size_t bytes_allocated = GetBytesAllocated();
+ last_gc_size_ = bytes_allocated;
+ last_gc_time_ns_ = NanoTime();
+
+ size_t target_size = bytes_allocated / GetTargetHeapUtilization();
+ if (target_size > bytes_allocated + max_free_) {
+ target_size = bytes_allocated + max_free_;
+ } else if (target_size < bytes_allocated + min_free_) {
+ target_size = bytes_allocated + min_free_;
+ }
+
+ SetIdealFootprint(target_size);
+
+ // Calculate when to perform the next ConcurrentGC.
+ if (concurrent_gc_) {
+ // Calculate the estimated GC duration.
+ double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
+ // Estimate how many remaining bytes we will have when we need to start the next GC.
+ size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
+ remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
+ if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+ // A never going to happen situation that from the estimated allocation rate we will exceed
+ // the applications entire footprint with the given estimated allocation rate. Schedule
+ // another GC straight away.
+ concurrent_start_bytes_ = bytes_allocated;
+ } else {
+ // Start a concurrent GC when we get close to the estimated remaining bytes. When the
+ // allocation rate is very high, remaining_bytes could tell us that we should start a GC
+ // right away.
+ concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
+ }
+ DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
+ DCHECK_LE(max_allowed_footprint_, growth_limit_);
+ }
+}
+
+void Heap::ClearGrowthLimit() {
+ growth_limit_ = capacity_;
+ alloc_space_->ClearGrowthLimit();
+}
+
+void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
+ MemberOffset reference_queue_offset,
+ MemberOffset reference_queueNext_offset,
+ MemberOffset reference_pendingNext_offset,
+ MemberOffset finalizer_reference_zombie_offset) {
+ reference_referent_offset_ = reference_referent_offset;
+ reference_queue_offset_ = reference_queue_offset;
+ reference_queueNext_offset_ = reference_queueNext_offset;
+ reference_pendingNext_offset_ = reference_pendingNext_offset;
+ finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
+ CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
+ CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
+ CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
+ CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
+ CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
+}
+
+mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
+ DCHECK(reference != NULL);
+ DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
+ return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
+}
+
+void Heap::ClearReferenceReferent(mirror::Object* reference) {
+ DCHECK(reference != NULL);
+ DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
+ reference->SetFieldObject(reference_referent_offset_, NULL, true);
+}
+
+// Returns true if the reference object has not yet been enqueued.
+bool Heap::IsEnqueuable(const mirror::Object* ref) {
+ DCHECK(ref != NULL);
+ const mirror::Object* queue =
+ ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false);
+ const mirror::Object* queue_next =
+ ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false);
+ return (queue != NULL) && (queue_next == NULL);
+}
+
+void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) {
+ DCHECK(ref != NULL);
+ CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL);
+ CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL);
+ EnqueuePendingReference(ref, cleared_reference_list);
+}
+
+void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) {
+ DCHECK(ref != NULL);
+ DCHECK(list != NULL);
+
+ // TODO: Remove this lock, use atomic stacks for storing references.
+ MutexLock mu(Thread::Current(), *reference_queue_lock_);
+ if (*list == NULL) {
+ ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ *list = ref;
+ } else {
+ mirror::Object* head =
+ (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+ ref->SetFieldObject(reference_pendingNext_offset_, head, false);
+ (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
+ }
+}
+
+mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) {
+ DCHECK(list != NULL);
+ DCHECK(*list != NULL);
+ mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
+ false);
+ mirror::Object* ref;
+
+ // Note: the following code is thread-safe because it is only called from ProcessReferences which
+ // is single threaded.
+ if (*list == head) {
+ ref = *list;
+ *list = NULL;
+ } else {
+ mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_,
+ false);
+ (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
+ ref = head;
+ }
+ ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
+ return ref;
+}
+
+void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
+ ScopedObjectAccess soa(self);
+ JValue result;
+ ArgArray arg_array(NULL, 0);
+ arg_array.Append(reinterpret_cast<uint32_t>(object));
+ soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
+ arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
+}
+
+void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
+ DCHECK(cleared != NULL);
+ if (*cleared != NULL) {
+ // When a runtime isn't started there are no reference queues to care about so ignore.
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ ScopedObjectAccess soa(Thread::Current());
+ JValue result;
+ ArgArray arg_array(NULL, 0);
+ arg_array.Append(reinterpret_cast<uint32_t>(*cleared));
+ soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
+ arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
+ }
+ *cleared = NULL;
+ }
+}
+
+void Heap::RequestConcurrentGC(Thread* self) {
+ // Make sure that we can do a concurrent GC.
+ Runtime* runtime = Runtime::Current();
+ DCHECK(concurrent_gc_);
+ if (runtime == NULL || !runtime->IsFinishedStarting() ||
+ !runtime->IsConcurrentGcEnabled()) {
+ return;
+ }
+ {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ if (runtime->IsShuttingDown()) {
+ return;
+ }
+ }
+ if (self->IsHandlingStackOverflow()) {
+ return;
+ }
+
+ JNIEnv* env = self->GetJniEnv();
+ DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
+ DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
+ WellKnownClasses::java_lang_Daemons_requestGC);
+ CHECK(!env->ExceptionCheck());
+}
+
+void Heap::ConcurrentGC(Thread* self) {
+ {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ if (Runtime::Current()->IsShuttingDown()) {
+ return;
+ }
+ }
+
+ // Wait for any GCs currently running to finish.
+ if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
+ if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
+ CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseBackground, false);
+ } else {
+ CollectGarbageInternal(collector::kGcTypePartial, kGcCauseBackground, false);
+ }
+ }
+}
+
+void Heap::RequestHeapTrim() {
+ // GC completed and now we must decide whether to request a heap trim (advising pages back to the
+ // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
+ // a space it will hold its lock and can become a cause of jank.
+ // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
+ // forking.
+
+ // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
+ // because that only marks object heads, so a large array looks like lots of empty space. We
+ // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
+ // to utilization (which is probably inversely proportional to how much benefit we can expect).
+ // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
+ // not how much use we're making of those pages.
+ uint64_t ms_time = MilliTime();
+ float utilization =
+ static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size();
+ if ((utilization > 0.75f) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) {
+ // Don't bother trimming the alloc space if it's more than 75% utilized, or if a
+ // heap trim occurred in the last two seconds.
+ return;
+ }
+
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ Runtime* runtime = Runtime::Current();
+ if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) {
+ // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
+ // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
+ // as we don't hold the lock while requesting the trim).
+ return;
+ }
+ }
+
+ SchedPolicy policy;
+ get_sched_policy(self->GetTid(), &policy);
+ if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) {
+ // Don't trim the heap if we are a foreground or audio app.
+ return;
+ }
+
+ last_trim_time_ms_ = ms_time;
+ JNIEnv* env = self->GetJniEnv();
+ DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
+ DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
+ WellKnownClasses::java_lang_Daemons_requestHeapTrim);
+ CHECK(!env->ExceptionCheck());
+}
+
+size_t Heap::Trim() {
+ // Handle a requested heap trim on a thread outside of the main GC thread.
+ return alloc_space_->Trim();
+}
+
+} // namespace gc
+} // namespace art
diff --git a/src/gc/heap.h b/src/gc/heap.h
new file mode 100644
index 0000000..d86c7dc
--- /dev/null
+++ b/src/gc/heap.h
@@ -0,0 +1,623 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_HEAP_H_
+#define ART_SRC_GC_HEAP_H_
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "atomic_integer.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/card_table.h"
+#include "gc/collector/gc_type.h"
+#include "globals.h"
+#include "gtest/gtest.h"
+#include "locks.h"
+#include "offsets.h"
+#include "safe_map.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class ConditionVariable;
+class Mutex;
+class StackVisitor;
+class Thread;
+class TimingLogger;
+
+namespace mirror {
+ class Class;
+ class Object;
+} // namespace mirror
+
+namespace gc {
+namespace accounting {
+ class HeapBitmap;
+ class ModUnionTable;
+ class SpaceSetMap;
+} // namespace accounting
+
+namespace collector {
+ class GarbageCollector;
+ class MarkSweep;
+} // namespace collector
+
+namespace space {
+ class AllocSpace;
+ class DiscontinuousSpace;
+ class DlMallocSpace;
+ class ImageSpace;
+ class LargeObjectSpace;
+ class Space;
+ class SpaceTest;
+} // namespace space
+
+class AgeCardVisitor {
+ public:
+ byte operator ()(byte card) const {
+ if (card == accounting::CardTable::kCardDirty) {
+ return card - 1;
+ } else {
+ return 0;
+ }
+ }
+};
+
+// What caused the GC?
+enum GcCause {
+ // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
+ // retrying allocation.
+ kGcCauseForAlloc,
+ // A background GC trying to ensure there is free memory ahead of allocations.
+ kGcCauseBackground,
+ // An explicit System.gc() call.
+ kGcCauseExplicit,
+};
+std::ostream& operator<<(std::ostream& os, const GcCause& policy);
+
+// How we want to sanity check the heap's correctness.
+enum HeapVerificationMode {
+ kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified.
+ kNoHeapVerification, // Production default.
+ kVerifyAllFast, // Sanity check all heap accesses with quick(er) tests.
+ kVerifyAll // Sanity check all heap accesses.
+};
+const HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
+
+class Heap {
+ public:
+ static const size_t kDefaultInitialSize = 2 * MB;
+ static const size_t kDefaultMaximumSize = 32 * MB;
+ static const size_t kDefaultMaxFree = 2 * MB;
+ static const size_t kDefaultMinFree = kDefaultMaxFree / 4;
+
+ // Default target utilization.
+ static const double kDefaultTargetUtilization;
+
+ // Used so that we don't overflow the allocation time atomic integer.
+ static const size_t kTimeAdjust = 1024;
+
+ // Create a heap with the requested sizes. The possible empty
+ // image_file_names names specify Spaces to load based on
+ // ImageWriter output.
+ explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
+ size_t max_free, double target_utilization, size_t capacity,
+ const std::string& original_image_file_name, bool concurrent_gc);
+
+ ~Heap();
+
+ // Allocates and initializes storage for an object instance.
+ mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // The given reference is believed to be to an object in the Java heap, check the soundness of it.
+ void VerifyObjectImpl(const mirror::Object* o);
+ void VerifyObject(const mirror::Object* o) {
+ if (o != NULL && this != NULL && verify_object_mode_ > kNoHeapVerification) {
+ VerifyObjectImpl(o);
+ }
+ }
+
+ // Check sanity of all live references.
+ void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ bool VerifyHeapReferences()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ bool VerifyMissingCardMarks()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
+ // and doesn't abort on error, allowing the caller to report more
+ // meaningful diagnostics.
+ bool IsHeapAddress(const mirror::Object* obj);
+
+ // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
+ // Requires the heap lock to be held.
+ bool IsLiveObjectLocked(const mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Initiates an explicit garbage collection.
+ void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Does a concurrent GC, should only be called by the GC daemon thread
+ // through runtime.
+ void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+
+ // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
+ // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
+ void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
+ uint64_t* counts)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Implements JDWP RT_Instances.
+ void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Implements JDWP OR_ReferringObjects.
+ void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
+ // implement dalvik.system.VMRuntime.clearGrowthLimit.
+ void ClearGrowthLimit();
+
+ // Target ideal heap utilization ratio, implements
+ // dalvik.system.VMRuntime.getTargetHeapUtilization.
+ double GetTargetHeapUtilization() const {
+ return target_utilization_;
+ }
+
+ // Set target ideal heap utilization ratio, implements
+ // dalvik.system.VMRuntime.setTargetHeapUtilization.
+ void SetTargetHeapUtilization(float target);
+
+ // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
+ // from the system. Doesn't allow the space to exceed its growth limit.
+ void SetIdealFootprint(size_t max_allowed_footprint);
+
+ // Blocks the caller until the garbage collector becomes idle and returns
+ // true if we waited for the GC to complete.
+ collector::GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+
+ const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
+ return continuous_spaces_;
+ }
+
+ const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
+ return discontinuous_spaces_;
+ }
+
+ void SetReferenceOffsets(MemberOffset reference_referent_offset,
+ MemberOffset reference_queue_offset,
+ MemberOffset reference_queueNext_offset,
+ MemberOffset reference_pendingNext_offset,
+ MemberOffset finalizer_reference_zombie_offset);
+
+ mirror::Object* GetReferenceReferent(mirror::Object* reference);
+ void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Returns true if the reference object has not yet been enqueued.
+ bool IsEnqueuable(const mirror::Object* ref);
+ void EnqueueReference(mirror::Object* ref, mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Object* DequeuePendingReference(mirror::Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ MemberOffset GetReferencePendingNextOffset() {
+ DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
+ return reference_pendingNext_offset_;
+ }
+
+ MemberOffset GetFinalizerReferenceZombieOffset() {
+ DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
+ return finalizer_reference_zombie_offset_;
+ }
+
+ // Enable verification of object references when the runtime is sufficiently initialized.
+ void EnableObjectValidation() {
+ verify_object_mode_ = kDesiredHeapVerification;
+ if (verify_object_mode_ > kNoHeapVerification) {
+ VerifyHeap();
+ }
+ }
+
+ // Disable object reference verification for image writing.
+ void DisableObjectValidation() {
+ verify_object_mode_ = kHeapVerificationNotPermitted;
+ }
+
+ // Other checks may be performed if we know the heap should be in a sane state.
+ bool IsObjectValidationEnabled() const {
+ return kDesiredHeapVerification > kNoHeapVerification &&
+ verify_object_mode_ > kHeapVerificationNotPermitted;
+ }
+
+ void RecordFree(size_t freed_objects, size_t freed_bytes);
+
+ // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
+ // The call is not needed if NULL is stored in the field.
+ void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, const mirror::Object* /*new_value*/) {
+ card_table_->MarkCard(dst);
+ }
+
+ // Write barrier for array operations that update many field positions
+ void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
+ size_t /*length TODO: element_count or byte_count?*/) {
+ card_table_->MarkCard(dst);
+ }
+
+ accounting::CardTable* GetCardTable() const {
+ return card_table_.get();
+ }
+
+ void AddFinalizerReference(Thread* self, mirror::Object* object);
+
+ // Returns the number of bytes currently allocated.
+ size_t GetBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ // Returns the number of objects currently allocated.
+ size_t GetObjectsAllocated() const;
+
+ // Returns the total number of objects allocated since the heap was created.
+ size_t GetObjectsAllocatedEver() const;
+
+ // Returns the total number of bytes allocated since the heap was created.
+ size_t GetBytesAllocatedEver() const;
+
+ // Returns the total number of objects freed since the heap was created.
+ size_t GetObjectsFreedEver() const {
+ return total_objects_freed_ever_;
+ }
+
+ // Returns the total number of bytes freed since the heap was created.
+ size_t GetBytesFreedEver() const {
+ return total_bytes_freed_ever_;
+ }
+
+ // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
+ // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
+ // were specified. Android apps start with a growth limit (small heap size) which is
+ // cleared/extended for large apps.
+ int64_t GetMaxMemory() const {
+ return growth_limit_;
+ }
+
+ // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
+ // application.
+ int64_t GetTotalMemory() const {
+ // TODO: we use the footprint limit here which is conservative wrt number of pages really used.
+ // We could implement a more accurate count across all spaces.
+ return max_allowed_footprint_;
+ }
+
+ // Implements java.lang.Runtime.freeMemory.
+ int64_t GetFreeMemory() const {
+ return GetTotalMemory() - num_bytes_allocated_;
+ }
+
+ // Get the space that corresponds to an object's address. Current implementation searches all
+ // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
+ // TODO: consider using faster data structure like binary tree.
+ space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
+ space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
+ bool fail_ok) const;
+ space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
+
+ void DumpForSigQuit(std::ostream& os);
+
+ size_t Trim();
+
+ accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ return live_bitmap_.get();
+ }
+
+ accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ return mark_bitmap_.get();
+ }
+
+ accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ return live_stack_.get();
+ }
+
+ void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
+ // Mark and empty stack.
+ void FlushAllocStack()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Mark all the objects in the allocation stack in the specified bitmap.
+ void MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+ accounting::ObjectStack* stack)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Unmark all the objects in the allocation stack in the specified bitmap.
+ void UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+ accounting::ObjectStack* stack)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Update and mark mod union table based on gc type.
+ void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+ collector::GcType gc_type)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
+ // Assumes there is only one image space.
+ space::ImageSpace* GetImageSpace() const;
+
+ space::DlMallocSpace* GetAllocSpace() const {
+ return alloc_space_;
+ }
+
+ space::LargeObjectSpace* GetLargeObjectsSpace() const {
+ return large_object_space_;
+ }
+
+ void DumpSpaces();
+
+ // UnReserve the address range where the oat file will be placed.
+ void UnReserveOatFileAddressRange();
+
+ // GC performance measuring
+ void DumpGcPerformanceInfo(std::ostream& os);
+
+ // Thread pool.
+ void CreateThreadPool();
+ void DeleteThreadPool();
+ ThreadPool* GetThreadPool() {
+ return thread_pool_.get();
+ }
+
+ private:
+ // Allocates uninitialized storage. Passing in a null space tries to place the object in the
+ // large object space.
+ mirror::Object* Allocate(Thread* self, space::AllocSpace* space, size_t num_bytes)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Try to allocate a number of bytes, this function never does any GCs.
+ mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Pushes a list of cleared references out to the managed heap.
+ void EnqueueClearedReferences(mirror::Object** cleared_references);
+
+ void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+
+ void RecordAllocation(size_t size, mirror::Object* object)
+ LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
+ // which type of Gc was actually ran.
+ collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
+ bool clear_soft_references)
+ LOCKS_EXCLUDED(gc_complete_lock_,
+ Locks::heap_bitmap_lock_,
+ Locks::thread_suspend_count_lock_);
+
+ void PreGcVerification(collector::GarbageCollector* gc);
+ void PreSweepingGcVerification(collector::GarbageCollector* gc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PostGcVerification(collector::GarbageCollector* gc);
+
+ // Given the current contents of the alloc space, increase the allowed heap footprint to match
+ // the target utilization ratio. This should only be called immediately after a full garbage
+ // collection.
+ void GrowForUtilization(uint64_t gc_duration);
+
+ size_t GetPercentFree();
+
+ void AddContinuousSpace(space::ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void AddDiscontinuousSpace(space::DiscontinuousSpace* space)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
+ // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
+ // lock ordering for it.
+ void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
+
+ static void VerificationCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
+
+ // Swap the allocation stack with the live stack.
+ void SwapStacks();
+
+ // Clear cards and update the mod union table.
+ void ProcessCards(base::NewTimingLogger& timings);
+
+ // All-known continuous spaces, where objects lie within fixed bounds.
+ std::vector<space::ContinuousSpace*> continuous_spaces_;
+
+ // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
+ std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
+
+ // The allocation space we are currently allocating into.
+ space::DlMallocSpace* alloc_space_;
+
+ // The large object space we are currently allocating into.
+ space::LargeObjectSpace* large_object_space_;
+
+ // The card table, dirtied by the write barrier.
+ UniquePtr<accounting::CardTable> card_table_;
+
+ // The mod-union table remembers all of the references from the image space to the alloc /
+ // zygote spaces to allow the card table to be cleared.
+ UniquePtr<accounting::ModUnionTable> image_mod_union_table_;
+
+ // This table holds all of the references from the zygote space to the alloc space.
+ UniquePtr<accounting::ModUnionTable> zygote_mod_union_table_;
+
+ // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
+ // false for stop-the-world mark sweep.
+ const bool concurrent_gc_;
+
+ // If we have a zygote space.
+ bool have_zygote_space_;
+
+ // Guards access to the state of GC, associated conditional variable is used to signal when a GC
+ // completes.
+ Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
+
+ // Mutex held when adding references to reference queues.
+ // TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
+ Mutex* reference_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // True while the garbage collector is running.
+ volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
+
+ // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
+ volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
+
+ // Maximum size that the heap can reach.
+ const size_t capacity_;
+ // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
+ // programs it is "cleared" making it the same as capacity.
+ size_t growth_limit_;
+ // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+ // a GC should be triggered.
+ size_t max_allowed_footprint_;
+
+ // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
+ // it completes ahead of an allocation failing.
+ size_t concurrent_start_bytes_;
+
+ // Number of back-to-back sticky mark sweep collections.
+ size_t sticky_gc_count_;
+
+ // After how many sticky GCs we force to do a partial GC instead of sticky mark bits GC.
+ const size_t sticky_to_partial_gc_ratio_;
+
+ // Since the heap was created, how many bytes have been freed.
+ size_t total_bytes_freed_ever_;
+
+ // Since the heap was created, how many objects have been freed.
+ size_t total_objects_freed_ever_;
+
+ // Primitive objects larger than this size are put in the large object space.
+ const size_t large_object_threshold_;
+
+ // Number of bytes allocated. Adjusted after each allocation and free.
+ AtomicInteger num_bytes_allocated_;
+
+ // Heap verification flags.
+ const bool verify_missing_card_marks_;
+ const bool verify_system_weaks_;
+ const bool verify_pre_gc_heap_;
+ const bool verify_post_gc_heap_;
+ const bool verify_mod_union_table_;
+
+ // Parallel GC data structures.
+ UniquePtr<ThreadPool> thread_pool_;
+
+ // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
+ // it's probably better to just do a partial GC.
+ const size_t min_alloc_space_size_for_sticky_gc_;
+
+ // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a
+ // normal GC, it is important to not use it when we are almost out of memory.
+ const size_t min_remaining_space_for_sticky_gc_;
+
+ // The last time a heap trim occurred.
+ uint64_t last_trim_time_ms_;
+
+ // The nanosecond time at which the last GC ended.
+ uint64_t last_gc_time_ns_;
+
+ // How many bytes were allocated at the end of the last GC.
+ uint64_t last_gc_size_;
+
+ // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
+ // and the start of the current one.
+ uint64_t allocation_rate_;
+
+ // For a GC cycle, a bitmap that is set corresponding to the
+ UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+ UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+
+ // Mark stack that we reuse to avoid re-allocating the mark stack.
+ UniquePtr<accounting::ObjectStack> mark_stack_;
+
+ // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
+ // to use the live bitmap as the old mark bitmap.
+ const size_t max_allocation_stack_size_;
+ bool is_allocation_stack_sorted_;
+ UniquePtr<accounting::ObjectStack> allocation_stack_;
+
+ // Second allocation stack so that we can process allocation with the heap unlocked.
+ UniquePtr<accounting::ObjectStack> live_stack_;
+
+ // offset of java.lang.ref.Reference.referent
+ MemberOffset reference_referent_offset_;
+
+ // offset of java.lang.ref.Reference.queue
+ MemberOffset reference_queue_offset_;
+
+ // offset of java.lang.ref.Reference.queueNext
+ MemberOffset reference_queueNext_offset_;
+
+ // offset of java.lang.ref.Reference.pendingNext
+ MemberOffset reference_pendingNext_offset_;
+
+ // offset of java.lang.ref.FinalizerReference.zombie
+ MemberOffset finalizer_reference_zombie_offset_;
+
+ // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
+ // utilization, regardless of target utilization ratio.
+ size_t min_free_;
+
+ // The ideal maximum free size, when we grow the heap for utilization.
+ size_t max_free_;
+
+ // Target ideal heap utilization ratio
+ double target_utilization_;
+
+ // Total time which mutators are paused or waiting for GC to complete.
+ uint64_t total_wait_time_;
+
+ // Total number of objects allocated in microseconds.
+ const bool measure_allocation_time_;
+ AtomicInteger total_allocation_time_;
+
+ // The current state of heap verification, may be enabled or disabled.
+ HeapVerificationMode verify_object_mode_;
+
+ std::vector<collector::MarkSweep*> mark_sweep_collectors_;
+
+ // A map that we use to temporarily reserve address range for the oat file.
+ UniquePtr<MemMap> oat_file_map_;
+
+ friend class collector::MarkSweep;
+ friend class VerifyReferenceCardVisitor;
+ friend class VerifyReferenceVisitor;
+ friend class VerifyObjectVisitor;
+ friend class ScopedHeapLock;
+ friend class space::SpaceTest;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_HEAP_H_
diff --git a/src/gc/heap_bitmap-inl.h b/src/gc/heap_bitmap-inl.h
deleted file mode 100644
index 2811183..0000000
--- a/src/gc/heap_bitmap-inl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_HEAP_BITMAP_INL_H_
-#define ART_SRC_GC_HEAP_BITMAP_INL_H_
-
-#include "heap_bitmap.h"
-
-namespace art {
-
-template <typename Visitor>
-inline void HeapBitmap::Visit(const Visitor& visitor) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- SpaceBitmap* bitmap = *it;
- bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
- }
- large_objects_->Visit(visitor);
-}
-
-} // namespace art
-
-#endif // ART_SRC_GC_HEAP_BITMAP_INL_H_
diff --git a/src/gc/heap_bitmap.cc b/src/gc/heap_bitmap.cc
deleted file mode 100644
index cef6884..0000000
--- a/src/gc/heap_bitmap.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-#include "heap_bitmap.h"
-#include "space.h"
-
-namespace art {
-
-void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- if (*it == old_bitmap) {
- *it = new_bitmap;
- return;
- }
- }
- LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
-}
-
-void HeapBitmap::AddSpaceBitmap(SpaceBitmap* bitmap) {
- DCHECK(bitmap != NULL);
-
- // Check for interval overlap.
- for (Bitmaps::const_iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- SpaceBitmap* cur_bitmap = *it;
- if (bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
- bitmap->HeapLimit() > cur_bitmap->HeapBegin()) {
- LOG(FATAL) << "Overlapping space bitmaps added to heap bitmap!";
- }
- }
- bitmaps_.push_back(bitmap);
-}
-
-void HeapBitmap::SetLargeObjects(SpaceSetMap* large_objects) {
- DCHECK(large_objects != NULL);
- large_objects_ = large_objects;
-}
-
-HeapBitmap::HeapBitmap(Heap* heap) : heap_(heap), large_objects_(NULL) {
-
-}
-
-void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- (*it)->Walk(callback, arg);
- }
-
- large_objects_->Walk(callback, arg);
-}
-
-} // namespace art
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h
deleted file mode 100644
index 87e0848..0000000
--- a/src/gc/heap_bitmap.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_HEAP_BITMAP_H_
-#define ART_SRC_GC_HEAP_BITMAP_H_
-
-#include "locks.h"
-#include "space_bitmap.h"
-
-namespace art {
-class Heap;
-
-class HeapBitmap {
- public:
- bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- return bitmap->Test(obj);
- } else {
- return large_objects_->Test(obj);
- }
- }
-
- void Clear(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Clear(obj);
- } else {
- large_objects_->Clear(obj);
- }
- }
-
- void Set(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- large_objects_->Set(obj);
- }
- }
-
- SpaceBitmap* GetSpaceBitmap(const mirror::Object* obj) {
- // TODO: C++0x auto
- for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- if ((*it)->HasAddress(obj)) {
- return *it;
- }
- }
- return NULL;
- }
-
- void Walk(SpaceBitmap::Callback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- template <typename Visitor>
- void Visit(const Visitor& visitor)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
- void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- HeapBitmap(Heap* heap);
-
- inline SpaceSetMap* GetLargeObjects() const {
- return large_objects_;
- }
-
- void SetLargeObjects(SpaceSetMap* large_objects);
-
- private:
-
- const Heap* const heap_;
-
- void AddSpaceBitmap(SpaceBitmap* bitmap);
-
- typedef std::vector<SpaceBitmap*> Bitmaps;
- Bitmaps bitmaps_;
-
- // Large object sets.
- SpaceSetMap* large_objects_;
-
- friend class Heap;
-};
-
-} // namespace art
-
-#endif // ART_SRC_GC_HEAP_BITMAP_H_
diff --git a/src/gc/heap_test.cc b/src/gc/heap_test.cc
new file mode 100644
index 0000000..02708e8
--- /dev/null
+++ b/src/gc/heap_test.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "sirt_ref.h"
+
+namespace art {
+namespace gc {
+
+class HeapTest : public CommonTest {};
+
+TEST_F(HeapTest, ClearGrowthLimit) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ int64_t max_memory_before = heap->GetMaxMemory();
+ int64_t total_memory_before = heap->GetTotalMemory();
+ heap->ClearGrowthLimit();
+ int64_t max_memory_after = heap->GetMaxMemory();
+ int64_t total_memory_after = heap->GetTotalMemory();
+ EXPECT_GE(max_memory_after, max_memory_before);
+ EXPECT_GE(total_memory_after, total_memory_before);
+}
+
+TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ // garbage is created during ClassLinker::Init
+
+ mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
+ for (size_t i = 0; i < 1024; ++i) {
+ SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c, 2048));
+ for (size_t j = 0; j < 2048; ++j) {
+ array->Set(j, mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ }
+ }
+ }
+ Runtime::Current()->GetHeap()->CollectGarbage(false);
+}
+
+TEST_F(HeapTest, HeapBitmapCapacityTest) {
+ byte* heap_begin = reinterpret_cast<byte*>(0x1000);
+ const size_t heap_capacity = accounting::SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1);
+ UniquePtr<accounting::SpaceBitmap> bitmap(accounting::SpaceBitmap::Create("test bitmap",
+ heap_begin,
+ heap_capacity));
+ mirror::Object* fake_end_of_heap_object =
+ reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity -
+ accounting::SpaceBitmap::kAlignment]);
+ bitmap->Set(fake_end_of_heap_object);
+}
+
+} // namespace gc
+} // namespace art
diff --git a/src/gc/mod_union_table-inl.h b/src/gc/mod_union_table-inl.h
deleted file mode 100644
index c1c69fb..0000000
--- a/src/gc/mod_union_table-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
-#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
-
-#include "mod_union_table.h"
-
-namespace art {
-
-template <typename Implementation>
-class ModUnionTableToZygoteAllocspace : public Implementation {
-public:
- ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
- }
-
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
- const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->Contains(ref)) {
- return (*it)->IsAllocSpace();
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
-template <typename Implementation>
-class ModUnionTableToAllocspace : public Implementation {
-public:
- ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
- }
-
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
- const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->Contains(ref)) {
- return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
-} // namespace art
-
-#endif // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
deleted file mode 100644
index da950bb..0000000
--- a/src/gc/mod_union_table.cc
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mod_union_table.h"
-
-#include "base/stl_util.h"
-#include "card_table-inl.h"
-#include "heap.h"
-#include "heap_bitmap.h"
-#include "mark_sweep.h"
-#include "mark_sweep-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/field-inl.h"
-#include "mirror/object_array-inl.h"
-#include "space.h"
-#include "space_bitmap-inl.h"
-#include "thread.h"
-#include "UniquePtr.h"
-
-using namespace art::mirror;
-
-namespace art {
-
-class MarkIfReachesAllocspaceVisitor {
- public:
- explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- // Extra parameters are required since we use this same visitor signature for checking objects.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
- // TODO: Optimize?
- // TODO: C++0x auto
- const Spaces& spaces = heap_->GetSpaces();
- for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
- bitmap_->Set(obj);
- break;
- }
- }
- }
-
- private:
- Heap* const heap_;
- SpaceBitmap* bitmap_;
-};
-
-class ModUnionVisitor {
- public:
- explicit ModUnionVisitor(Heap* const heap, SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- void operator ()(const Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- // We don't have an early exit since we use the visitor pattern, an early exit should
- // significantly speed this up.
- MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
- MarkSweep::VisitObjectReferences(obj, visitor);
- }
- private:
- Heap* const heap_;
- SpaceBitmap* bitmap_;
-};
-
-class ModUnionClearCardSetVisitor {
- public:
- explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
- : cleared_cards_(cleared_cards) {
- }
-
- inline void operator ()(byte* card, byte expected_value, byte new_value) const {
- if (expected_value == CardTable::kCardDirty) {
- cleared_cards_->insert(card);
- }
- }
-
- private:
- std::set<byte*>* const cleared_cards_;
-};
-
-class ModUnionClearCardVisitor {
- public:
- explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
- : cleared_cards_(cleared_cards) {
- }
-
- void operator ()(byte* card, byte expected_card, byte new_card) const {
- if (expected_card == CardTable::kCardDirty) {
- cleared_cards_->push_back(card);
- }
- }
- private:
- std::vector<byte*>* cleared_cards_;
-};
-
-ModUnionTableBitmap::ModUnionTableBitmap(Heap* heap) : ModUnionTable(heap) {
- // Prevent fragmentation of the heap which is caused by resizing of the vector.
- // TODO: Make a new vector which uses madvise (basically same as a mark stack).
- cleared_cards_.reserve(32);
- const Spaces& spaces = heap->GetSpaces();
- // Create one heap bitmap per image space.
- // TODO: C++0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- ContinuousSpace* space = *it;
- if (space->IsImageSpace()) {
- // The mod-union table is only needed when we have an image space since it's purpose is to
- // cache image roots.
- UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", space->Begin(),
- space->Size()));
- CHECK(bitmap.get() != NULL) << "Failed to create mod-union bitmap";
- bitmaps_.Put(space, bitmap.release());
- }
- }
-}
-
-ModUnionTableBitmap::~ModUnionTableBitmap() {
- STLDeleteValues(&bitmaps_);
-}
-
-void ModUnionTableBitmap::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = heap_->GetCardTable();
- ModUnionClearCardVisitor visitor(&cleared_cards_);
- // Clear dirty cards in the this image space and update the corresponding mod-union bits.
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-void ModUnionTableBitmap::Update() {
- CardTable* card_table = heap_->GetCardTable();
- while (!cleared_cards_.empty()) {
- byte* card = cleared_cards_.back();
- cleared_cards_.pop_back();
-
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- ContinuousSpace* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
- SpaceBitmap* bitmap = space->GetLiveBitmap();
-
- // Clear the mod-union bitmap range corresponding to this card so that we don't have any
- // objects marked which do not reach the alloc space.
- bitmap->VisitRange(start, end, SpaceBitmap::ClearVisitor(bitmap));
-
- // At this point we need to update the mod-union bitmap to contain all the objects which reach
- // the alloc space.
- ModUnionVisitor visitor(heap_, bitmap);
- space->GetLiveBitmap()->VisitMarkedRange(start, end, visitor, VoidFunctor());
- }
-}
-
-class ModUnionScanImageRootVisitor {
- public:
- ModUnionScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
- }
-
- void operator ()(const Object* root) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(root != NULL);
- mark_sweep_->ScanRoot(root);
- }
-
- private:
- MarkSweep* const mark_sweep_;
-};
-
-void ModUnionTableBitmap::MarkReferences(MarkSweep* mark_sweep) {
- // Some tests have no image space, and therefore no mod-union bitmap.
- ModUnionScanImageRootVisitor image_root_scanner(mark_sweep);
- for (BitmapMap::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
- const ContinuousSpace* space = it->first;
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- it->second->VisitMarkedRange(begin, end, image_root_scanner, VoidFunctor());
- }
-}
-
-
-ModUnionTableReferenceCache::ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {
-
-}
-
-ModUnionTableReferenceCache::~ModUnionTableReferenceCache() {
-
-}
-
-void ModUnionTableReferenceCache::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
- // Clear dirty cards in the this space and update the corresponding mod-union bits.
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-class AddToReferenceArrayVisitor {
- public:
- explicit AddToReferenceArrayVisitor(
- ModUnionTableReferenceCache* const mod_union_table,
- ModUnionTableReferenceCache::ReferenceArray* references)
- : mod_union_table_(mod_union_table),
- references_(references) {
- }
-
- // Extra parameters are required since we use this same visitor signature for checking objects.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const {
- // Only add the reference if it is non null and fits our criteria.
- if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
- references_->push_back(ref);
- }
- }
-
- private:
- ModUnionTableReferenceCache* mod_union_table_;
- ModUnionTable::ReferenceArray* references_;
-};
-
-class ModUnionReferenceVisitor {
- public:
- explicit ModUnionReferenceVisitor(
- ModUnionTableReferenceCache* const mod_union_table,
- ModUnionTableReferenceCache::ReferenceArray* references)
- : mod_union_table_(mod_union_table),
- references_(references) {
- }
-
- void operator ()(const Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- // We don't have an early exit since we use the visitor pattern, an early
- // exit should significantly speed this up.
- AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
- MarkSweep::VisitObjectReferences(obj, visitor);
- }
- private:
- ModUnionTableReferenceCache* const mod_union_table_;
- ModUnionTable::ReferenceArray* references_;
-};
-
-
-class CheckReferenceVisitor {
- public:
- typedef std::set<const Object*> ReferenceSet;
-
- explicit CheckReferenceVisitor(
- ModUnionTableReferenceCache* const mod_union_table,
- const ReferenceSet& references)
- : mod_union_table_(mod_union_table),
- references_(references) {
- }
-
- // Extra parameters are required since we use this same visitor signature for checking objects.
- // TODO: Fixme when anotatalysis works with visitors.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- Heap* heap = mod_union_table_->GetHeap();
- if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
- references_.find(ref) == references_.end()) {
- ContinuousSpace* from_space = heap->FindSpaceFromObject(obj);
- ContinuousSpace* to_space = heap->FindSpaceFromObject(ref);
- LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
- << "References " << reinterpret_cast<const void*>(ref)
- << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
- LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
- LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
- mod_union_table_->GetHeap()->DumpSpaces();
- LOG(FATAL) << "FATAL ERROR";
- }
- }
-
- private:
- ModUnionTableReferenceCache* const mod_union_table_;
- const ReferenceSet& references_;
-};
-
-class ModUnionCheckReferences {
- public:
- typedef std::set<const Object*> ReferenceSet;
-
- explicit ModUnionCheckReferences (
- ModUnionTableReferenceCache* const mod_union_table,
- const ReferenceSet& references)
- : mod_union_table_(mod_union_table),
- references_(references) {
- }
-
- void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
- DCHECK(obj != NULL);
- if (kDebugLocking) {
- Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- }
- CheckReferenceVisitor visitor(mod_union_table_, references_);
- MarkSweep::VisitObjectReferences(obj, visitor);
- }
-
- private:
- ModUnionTableReferenceCache* const mod_union_table_;
- const ReferenceSet& references_;
-};
-
-void ModUnionTableReferenceCache::Verify() {
- // Start by checking that everything in the mod union table is marked.
- Heap* heap = GetHeap();
- for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
- for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end();
- ++it_ref ) {
- DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
- }
- }
-
- // Check the references of each clean card which is also in the mod union table.
- for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
- const byte* card = &*it->first;
- if (*card == CardTable::kCardClean) {
- std::set<const Object*> reference_set;
- for (ReferenceArray::const_iterator itr = it->second.begin(); itr != it->second.end();++itr) {
- reference_set.insert(*itr);
- }
- ModUnionCheckReferences visitor(this, reference_set);
- CardTable* card_table = heap->GetCardTable();
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- SpaceBitmap* live_bitmap =
- heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
- live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
- }
- }
-}
-
-void ModUnionTableReferenceCache::Update() {
- Heap* heap = GetHeap();
- CardTable* card_table = heap->GetCardTable();
-
- ReferenceArray cards_references;
- ModUnionReferenceVisitor visitor(this, &cards_references);
-
- for (ClearedCards::iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- // Clear and re-compute alloc space references associated with this card.
- cards_references.clear();
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- SpaceBitmap* live_bitmap =
- heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
- live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
-
- // Update the corresponding references for the card.
- // TODO: C++0x auto
- ReferenceMap::iterator found = references_.find(card);
- if (found == references_.end()) {
- if (cards_references.empty()) {
- // No reason to add empty array.
- continue;
- }
- references_.Put(card, cards_references);
- } else {
- found->second = cards_references;
- }
- }
- cleared_cards_.clear();
-}
-
-void ModUnionTableReferenceCache::MarkReferences(MarkSweep* mark_sweep) {
- // TODO: C++0x auto
- size_t count = 0;
- for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
- for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
- mark_sweep->MarkRoot(*it_ref);
- ++count;
- }
- }
- if (VLOG_IS_ON(heap)) {
- VLOG(gc) << "Marked " << count << " references in mod union table";
- }
-}
-
-ModUnionTableCardCache::ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {
-
-}
-
-ModUnionTableCardCache::~ModUnionTableCardCache() {
-
-}
-
-void ModUnionTableCardCache::ClearCards(ContinuousSpace* space) {
- CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
- // Clear dirty cards in the this space and update the corresponding mod-union bits.
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::MarkReferences(MarkSweep* mark_sweep) {
- CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor visitor(mark_sweep);
- for (ClearedCards::const_iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- SpaceBitmap* live_bitmap =
- heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
- live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
- }
-}
-
-} // namespace art
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
deleted file mode 100644
index c0b9535..0000000
--- a/src/gc/mod_union_table.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_MOD_UNION_TABLE_H_
-#define ART_SRC_GC_MOD_UNION_TABLE_H_
-
-#include "globals.h"
-#include "safe_map.h"
-
-#include <set>
-#include <vector>
-
-namespace art {
-namespace mirror {
-class Object;
-}
-class ContinuousSpace;
-class Heap;
-class HeapBitmap;
-class MarkSweep;
-class Space;
-class SpaceBitmap;
-
-// Base class
-class ModUnionTable {
- public:
- typedef std::vector<const mirror::Object*> ReferenceArray;
- typedef std::set<byte*> ClearedCards;
-
- ModUnionTable(Heap* heap) : heap_(heap) {
-
- }
-
- virtual ~ModUnionTable() {
-
- }
-
- // Clear cards which map to a memory range of a space.
- virtual void ClearCards(ContinuousSpace* space) = 0;
-
- // Update the mod-union table.
- virtual void Update() = 0;
-
- // Mark all references which are stored in the mod union table.
- virtual void MarkReferences(MarkSweep* mark_sweep) = 0;
-
- // Verification, sanity checks that we don't have clean cards which conflict with out cached data
- // for said cards. Exclusive lock is required since verify sometimes uses
- // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
- // bitmap or not.
- virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
-
- Heap* GetHeap() const {
- return heap_;
- }
-
- protected:
- Heap* const heap_;
-};
-
-// Bitmap implementation.
-// DEPRECATED, performs strictly less well than merely caching which cards were dirty.
-class ModUnionTableBitmap : public ModUnionTable {
- public:
- ModUnionTableBitmap(Heap* heap);
- virtual ~ModUnionTableBitmap();
-
- // Clear space cards.
- void ClearCards(ContinuousSpace* space);
-
- // Update table based on cleared cards.
- void Update()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Mark all references to the alloc space(s).
- void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- protected:
- // Cleared card array, used to update the mod-union table.
- std::vector<byte*> cleared_cards_;
-
- // One bitmap per image space.
- // TODO: Add support for Zygote spaces?
- typedef SafeMap<ContinuousSpace*, SpaceBitmap*> BitmapMap;
- BitmapMap bitmaps_;
-};
-
-// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
-class ModUnionTableReferenceCache : public ModUnionTable {
- public:
- typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
-
- ModUnionTableReferenceCache(Heap* heap);
- virtual ~ModUnionTableReferenceCache();
-
- // Clear and store cards for a space.
- void ClearCards(ContinuousSpace* space);
-
- // Update table based on cleared cards.
- void Update()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Mark all references to the alloc space(s).
- void MarkReferences(MarkSweep* mark_sweep)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
- // VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Function that tells whether or not to add a reference to the table.
- virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
-
- protected:
- // Cleared card array, used to update the mod-union table.
- ClearedCards cleared_cards_;
-
- // Maps from dirty cards to their corresponding alloc space references.
- ReferenceMap references_;
-};
-
-// Card caching implementation. Keeps track of which cards we cleared and only this information.
-class ModUnionTableCardCache : public ModUnionTable {
- public:
- typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
-
- ModUnionTableCardCache(Heap* heap);
- virtual ~ModUnionTableCardCache();
-
- // Clear and store cards for a space.
- void ClearCards(ContinuousSpace* space);
-
- // Nothing to update.
- void Update() {}
-
- // Mark all references to the alloc space(s).
- void MarkReferences(MarkSweep* mark_sweep)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Nothing to verify.
- void Verify() {}
-
- protected:
- // Cleared card array, used to update the mod-union table.
- ClearedCards cleared_cards_;
-};
-
-} // namespace art
-
-#endif // ART_SRC_GC_MOD_UNION_TABLE_H_
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
deleted file mode 100644
index 64c0bcd..0000000
--- a/src/gc/partial_mark_sweep.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
-#define ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
-
-#include "locks.h"
-#include "mark_sweep.h"
-
-namespace art {
-
-class PartialMarkSweep : public MarkSweep {
- public:
- virtual GcType GetGcType() const {
- return kGcTypePartial;
- }
-
- explicit PartialMarkSweep(Heap* heap, bool is_concurrent);
- ~PartialMarkSweep();
-
-protected:
- virtual void BindBitmaps()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
-};
-
-} // namespace art
-
-#endif // ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/space.h b/src/gc/space.h
deleted file mode 100644
index d2bcd53..0000000
--- a/src/gc/space.h
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_SPACE_H_
-#define ART_SRC_GC_SPACE_H_
-
-#include <string>
-
-#include "UniquePtr.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "globals.h"
-#include "image.h"
-#include "dlmalloc.h"
-#include "mem_map.h"
-
-namespace art {
-
-static const bool kDebugSpaces = kIsDebugBuild;
-
-namespace mirror {
-class Object;
-} // namespace mirror
-class DlMallocSpace;
-class ImageSpace;
-class LargeObjectSpace;
-class SpaceBitmap;
-
-enum GcRetentionPolicy {
- kGcRetentionPolicyNeverCollect,
- kGcRetentionPolicyAlwaysCollect,
- kGcRetentionPolicyFullCollect, // Collect only for full GC
-};
-std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
-
-enum SpaceType {
- kSpaceTypeImageSpace,
- kSpaceTypeAllocSpace,
- kSpaceTypeZygoteSpace,
- kSpaceTypeLargeObjectSpace,
-};
-std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
-
-// A space contains memory allocated for managed objects.
-class Space {
- public:
- virtual bool CanAllocateInto() const = 0;
- virtual bool IsCompactible() const = 0;
- virtual bool Contains(const mirror::Object* obj) const = 0;
- virtual SpaceType GetType() const = 0;
- virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0;
- virtual std::string GetName() const = 0;
-
- ImageSpace* AsImageSpace();
- DlMallocSpace* AsAllocSpace();
- DlMallocSpace* AsZygoteSpace();
- LargeObjectSpace* AsLargeObjectSpace();
-
- bool IsImageSpace() const {
- return GetType() == kSpaceTypeImageSpace;
- }
-
- bool IsAllocSpace() const {
- return GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace;
- }
-
- bool IsZygoteSpace() const {
- return GetType() == kSpaceTypeZygoteSpace;
- }
-
- bool IsLargeObjectSpace() const {
- return GetType() == kSpaceTypeLargeObjectSpace;
- }
-
- virtual void Dump(std::ostream& /* os */) const { }
-
- virtual ~Space() {}
-
- protected:
- Space() { }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Space);
-};
-
-// AllocSpace interface.
-class AllocSpace {
- public:
- virtual bool CanAllocateInto() const {
- return true;
- }
-
- // General statistics
- virtual uint64_t GetNumBytesAllocated() const = 0;
- virtual uint64_t GetNumObjectsAllocated() const = 0;
- virtual uint64_t GetTotalBytesAllocated() const = 0;
- virtual uint64_t GetTotalObjectsAllocated() const = 0;
-
- // Allocate num_bytes without allowing growth.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
-
- // Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj) = 0;
-
- // Returns how many bytes were freed.
- virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
-
- // Returns how many bytes were freed.
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
-
- protected:
- AllocSpace() {}
- virtual ~AllocSpace() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AllocSpace);
-};
-
-// Continuous spaces have bitmaps, and an address range.
-class ContinuousSpace : public Space {
- public:
- // Address at which the space begins
- byte* Begin() const {
- return begin_;
- }
-
- // Address at which the space ends, which may vary as the space is filled.
- byte* End() const {
- return end_;
- }
-
- // Current size of space
- size_t Size() const {
- return End() - Begin();
- }
-
- virtual SpaceBitmap* GetLiveBitmap() const = 0;
- virtual SpaceBitmap* GetMarkBitmap() const = 0;
-
- // Is object within this space?
- bool HasAddress(const mirror::Object* obj) const {
- const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
- return Begin() <= byte_ptr && byte_ptr < End();
- }
-
- virtual bool Contains(const mirror::Object* obj) const {
- return HasAddress(obj);
- }
-
- virtual ~ContinuousSpace() {}
-
- virtual std::string GetName() const {
- return name_;
- }
-
- virtual GcRetentionPolicy GetGcRetentionPolicy() const {
- return gc_retention_policy_;
- }
-
- protected:
- ContinuousSpace(const std::string& name, byte* begin, byte* end,
- GcRetentionPolicy gc_retention_policy);
-
- std::string name_;
- GcRetentionPolicy gc_retention_policy_;
-
- // The beginning of the storage for fast access.
- byte* begin_;
-
- // Current end of the space.
- byte* end_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
-};
-
-class DiscontinuousSpace : public virtual Space {
- public:
- // Is object within this space?
- virtual bool Contains(const mirror::Object* obj) const = 0;
-
- virtual std::string GetName() const {
- return name_;
- }
-
- virtual GcRetentionPolicy GetGcRetentionPolicy() const {
- return gc_retention_policy_;
- }
-
-protected:
- DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
-
-private:
- std::string name_;
- GcRetentionPolicy gc_retention_policy_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
-};
-
-std::ostream& operator<<(std::ostream& os, const Space& space);
-
-class MemMapSpace : public ContinuousSpace {
- public:
- // Maximum which the mapped space can grow to.
- virtual size_t Capacity() const {
- return mem_map_->Size();
- }
-
- // Size of the space without a limit on its growth. By default this is just the Capacity, but
- // for the allocation space we support starting with a small heap and then extending it.
- virtual size_t NonGrowthLimitCapacity() const {
- return Capacity();
- }
-
- protected:
- MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
- GcRetentionPolicy gc_retention_policy);
-
- MemMap* GetMemMap() {
- return mem_map_.get();
- }
-
- const MemMap* GetMemMap() const {
- return mem_map_.get();
- }
-
- private:
- // Underlying storage of the space
- UniquePtr<MemMap> mem_map_;
-
- DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
-};
-
-// An alloc space is a space where objects may be allocated and garbage collected.
-class DlMallocSpace : public MemMapSpace, public AllocSpace {
- public:
- typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
-
- virtual bool CanAllocateInto() const {
- return true;
- }
-
- virtual bool IsCompactible() const {
- return false;
- }
-
- virtual SpaceType GetType() const {
- return kSpaceTypeAllocSpace;
- }
-
- // Create a AllocSpace with the requested sizes. The requested
- // base address is not guaranteed to be granted, if it is required,
- // the caller should call Begin on the returned space to confirm
- // the request was granted.
- static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin);
-
- // Allocate num_bytes without allowing the underlying mspace to grow.
- virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
-
- // Allocate num_bytes allowing the underlying mspace to grow.
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
-
- // Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj);
- virtual size_t Free(Thread* self, mirror::Object* ptr);
- virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
-
- void* MoreCore(intptr_t increment);
-
- void* GetMspace() const {
- return mspace_;
- }
-
- // Hands unused pages back to the system.
- size_t Trim();
-
- // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
- // in use, indicated by num_bytes equaling zero.
- void Walk(WalkCallback callback, void* arg);
-
- // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
- size_t GetFootprintLimit();
-
- // Set the maximum number of bytes that the heap is allowed to obtain from the system via
- // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
- // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
- void SetFootprintLimit(size_t limit);
-
- // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
- // maximum reserved size of the heap.
- void ClearGrowthLimit() {
- growth_limit_ = NonGrowthLimitCapacity();
- }
-
- // Override capacity so that we only return the possibly limited capacity
- virtual size_t Capacity() const {
- return growth_limit_;
- }
-
- // The total amount of memory reserved for the alloc space
- virtual size_t NonGrowthLimitCapacity() const {
- return GetMemMap()->Size();
- }
-
- virtual SpaceBitmap* GetLiveBitmap() const {
- return live_bitmap_.get();
- }
-
- virtual SpaceBitmap* GetMarkBitmap() const {
- return mark_bitmap_.get();
- }
-
- virtual void Dump(std::ostream& os) const;
-
- void SetGrowthLimit(size_t growth_limit);
-
- // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
- virtual void SwapBitmaps();
-
- // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
- DlMallocSpace* CreateZygoteSpace();
-
- void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
- gc_retention_policy_ = gc_retention_policy;
- }
-
- virtual uint64_t GetNumBytesAllocated() const {
- return num_bytes_allocated_;
- }
-
- virtual uint64_t GetNumObjectsAllocated() const {
- return num_objects_allocated_;
- }
-
- virtual uint64_t GetTotalBytesAllocated() const {
- return total_bytes_allocated_;
- }
-
- virtual uint64_t GetTotalObjectsAllocated() const {
- return total_objects_allocated_;
- }
-
- private:
- size_t InternalAllocationSize(const mirror::Object* obj);
- mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- UniquePtr<SpaceBitmap> live_bitmap_;
- UniquePtr<SpaceBitmap> mark_bitmap_;
- UniquePtr<SpaceBitmap> temp_bitmap_;
-
- // Approximate number of bytes which have been allocated into the space.
- size_t num_bytes_allocated_;
- size_t num_objects_allocated_;
- size_t total_bytes_allocated_;
- size_t total_objects_allocated_;
-
- static size_t bitmap_index_;
-
- DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- size_t growth_limit);
-
- bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
-
- static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
-
- // The boundary tag overhead.
- static const size_t kChunkOverhead = kWordSize;
-
- // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-
- // Underlying malloc space
- void* const mspace_;
-
- // The capacity of the alloc space until such time that ClearGrowthLimit is called.
- // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
- // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
- // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
- // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
- // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
- // one time by a call to ClearGrowthLimit.
- size_t growth_limit_;
-
- friend class MarkSweep;
-
- DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
-};
-
-// An image space is a space backed with a memory mapped image
-class ImageSpace : public MemMapSpace {
- public:
- virtual bool CanAllocateInto() const {
- return false;
- }
-
- virtual bool IsCompactible() const {
- return false;
- }
-
- virtual SpaceType GetType() const {
- return kSpaceTypeImageSpace;
- }
-
- // create a Space from an image file. cannot be used for future allocation or collected.
- static ImageSpace* Create(const std::string& image)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- const ImageHeader& GetImageHeader() const {
- return *reinterpret_cast<ImageHeader*>(Begin());
- }
-
- const std::string GetImageFilename() const {
- return GetName();
- }
-
- // Mark the objects defined in this space in the given live bitmap
- void RecordImageAllocations(SpaceBitmap* live_bitmap) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- virtual SpaceBitmap* GetLiveBitmap() const {
- return live_bitmap_.get();
- }
-
- virtual SpaceBitmap* GetMarkBitmap() const {
- // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
- // special cases to test against.
- return live_bitmap_.get();
- }
-
- virtual void Dump(std::ostream& os) const;
-
- private:
- friend class Space;
-
- UniquePtr<SpaceBitmap> live_bitmap_;
- static size_t bitmap_index_;
-
- ImageSpace(const std::string& name, MemMap* mem_map);
-
- DISALLOW_COPY_AND_ASSIGN(ImageSpace);
-};
-
-// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
-// pages back to the kernel.
-void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
-
-} // namespace art
-
-#endif // ART_SRC_GC_SPACE_H_
diff --git a/src/gc/space.cc b/src/gc/space/dlmalloc_space.cc
similarity index 63%
rename from src/gc/space.cc
rename to src/gc/space/dlmalloc_space.cc
index 1d3ee28..02acd28 100644
--- a/src/gc/space.cc
+++ b/src/gc/space/dlmalloc_space.cc
@@ -13,33 +13,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-#include "space.h"
-
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
-#include "card_table.h"
-#include "dlmalloc.h"
-#include "image.h"
-#include "mirror/array.h"
-#include "mirror/abstract_method.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "os.h"
+#include "dlmalloc_space.h"
+#include "gc/accounting/card_table.h"
+#include "gc/heap.h"
#include "runtime.h"
-#include "space_bitmap.h"
-#include "space_bitmap-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
#include "utils.h"
+//#include <valgrind/memcheck.h>
+#include <valgrind.h>
+
namespace art {
-
-static const bool kPrefetchDuringDlMallocFreeList = true;
-
-// Magic padding value that we use to check for buffer overruns.
-static const word kPaddingValue = 0xBAC0BAC0;
+namespace gc {
+namespace space {
// TODO: Remove define macro
#define CHECK_MEMORY_CALL(call, args, what) \
@@ -51,45 +37,86 @@
} \
} while (false)
-ImageSpace* Space::AsImageSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
- return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
-}
+static const bool kPrefetchDuringDlMallocFreeList = true;
-DlMallocSpace* Space::AsAllocSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
- return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
-}
+// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
+// after each allocation. 8 bytes provides long/double alignment.
+const size_t kValgrindRedZoneBytes = 8;
-DlMallocSpace* Space::AsZygoteSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
- return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
-}
+// A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
+class ValgrindDlMallocSpace : public DlMallocSpace {
+ public:
+ virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes) {
+ void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + (2 * kValgrindRedZoneBytes));
+ if (obj_with_rdz != NULL) {
+ //VALGRIND_MAKE_MEM_UNDEFINED();
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+ kValgrindRedZoneBytes);
+ VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+ LOG(INFO) << "AllocWithGrowth on " << self << " = " << obj_with_rdz
+ << " of size " << num_bytes;
+ return result;
+ } else {
+ return NULL;
+ }
+ }
-LargeObjectSpace* Space::AsLargeObjectSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
- return reinterpret_cast<LargeObjectSpace*>(this);
-}
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) {
+ void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + (2 * kValgrindRedZoneBytes));
+ if (obj_with_rdz != NULL) {
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+ kValgrindRedZoneBytes);
+ VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+ LOG(INFO) << "Alloc on " << self << " = " << obj_with_rdz
+ << " of size " << num_bytes;
+ return result;
+ } else {
+ return NULL;
+ }
+ }
-ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
- GcRetentionPolicy gc_retention_policy)
- : name_(name), gc_retention_policy_(gc_retention_policy), begin_(begin), end_(end) {
+ virtual size_t AllocationSize(const mirror::Object* obj) {
+ const void* obj_after_rdz = reinterpret_cast<const void*>(obj);
+ size_t result = DlMallocSpace::AllocationSize(
+ reinterpret_cast<const mirror::Object*>(reinterpret_cast<const byte*>(obj_after_rdz) -
+ kValgrindRedZoneBytes));
+ return result - (2 * kValgrindRedZoneBytes);
+ }
-}
+ virtual size_t Free(Thread* self, mirror::Object* ptr) {
+ void* obj_after_rdz = reinterpret_cast<void*>(ptr);
+ void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ LOG(INFO) << "Free on " << self << " of " << obj_with_rdz;
+ size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+ VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+ return freed - (2 * kValgrindRedZoneBytes);
+ }
-DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
- GcRetentionPolicy gc_retention_policy)
- : name_(name), gc_retention_policy_(gc_retention_policy) {
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ size_t freed = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ void* obj_after_rdz = reinterpret_cast<void*>(ptrs[i]);
+ void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ LOG(INFO) << "FreeList on " << self << " of " << obj_with_rdz;
+ freed += DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+ VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+ }
+ return freed - (2 * kValgrindRedZoneBytes * num_ptrs);
+ }
-}
+ ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+ byte* end, size_t growth_limit) :
+ DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+ VALGRIND_CREATE_MEMPOOL(GetMspace(), kValgrindRedZoneBytes, true);
+ }
-MemMapSpace::MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
- GcRetentionPolicy gc_retention_policy)
- : ContinuousSpace(name, mem_map->Begin(), mem_map->Begin() + initial_size, gc_retention_policy),
- mem_map_(mem_map)
-{
+ virtual ~ValgrindDlMallocSpace() {
+ VALGRIND_DESTROY_MEMPOOL(GetMspace());
+ }
-}
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
+};
size_t DlMallocSpace::bitmap_index_ = 0;
@@ -103,15 +130,15 @@
size_t bitmap_index = bitmap_index_++;
- static const uintptr_t kGcCardSize = static_cast<uintptr_t>(CardTable::kCardSize);
+ static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
- live_bitmap_.reset(SpaceBitmap::Create(
+ live_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
- mark_bitmap_.reset(SpaceBitmap::Create(
+ mark_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
@@ -177,8 +204,13 @@
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
- DlMallocSpace* space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
- growth_limit);
+ DlMallocSpace* space;
+ if (RUNNING_ON_VALGRIND > 0) {
+ space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ growth_limit);
+ } else {
+ space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+ }
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -203,33 +235,26 @@
}
void DlMallocSpace::SwapBitmaps() {
- SpaceBitmap* temp_live_bitmap = live_bitmap_.release();
- live_bitmap_.reset(mark_bitmap_.release());
- mark_bitmap_.reset(temp_live_bitmap);
+ live_bitmap_.swap(mark_bitmap_);
// Swap names to get more descriptive diagnostics.
- std::string temp_name = live_bitmap_->GetName();
+ std::string temp_name(live_bitmap_->GetName());
live_bitmap_->SetName(mark_bitmap_->GetName());
mark_bitmap_->SetName(temp_name);
}
mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
- if (kDebugSpaces) {
- num_bytes += sizeof(word);
- }
-
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
- if (kDebugSpaces && result != NULL) {
- CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
- << ") not in bounds of allocation space " << *this;
- // Put a magic pattern before and after the allocation.
- *reinterpret_cast<word*>(reinterpret_cast<byte*>(result) + AllocationSize(result)
- - sizeof(word) - kChunkOverhead) = kPaddingValue;
+ if (result != NULL) {
+ if (kDebugSpaces) {
+ CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
+ << ") not in bounds of allocation space " << *this;
+ }
+ size_t allocation_size = AllocationSize(result);
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
+ ++total_objects_allocated_;
+ ++num_objects_allocated_;
}
- size_t allocation_size = AllocationSize(result);
- num_bytes_allocated_ += allocation_size;
- total_bytes_allocated_ += allocation_size;
- ++total_objects_allocated_;
- ++num_objects_allocated_;
return result;
}
@@ -263,8 +288,8 @@
DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
- DCHECK(IsAligned<CardTable::kCardSize>(begin_));
- DCHECK(IsAligned<CardTable::kCardSize>(end_));
+ DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
+ DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
DCHECK(IsAligned<kPageSize>(begin_));
DCHECK(IsAligned<kPageSize>(end_));
size_t size = RoundUp(Size(), kPageSize);
@@ -291,7 +316,7 @@
VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName().c_str(), End(), capacity, PROT_READ | PROT_WRITE));
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE));
void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
@@ -314,9 +339,6 @@
if (kDebugSpaces) {
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
- CHECK_EQ(
- *reinterpret_cast<word*>(reinterpret_cast<byte*>(ptr) + AllocationSize(ptr) -
- sizeof(word) - kChunkOverhead), kPaddingValue);
}
const size_t bytes_freed = InternalAllocationSize(ptr);
num_bytes_allocated_ -= bytes_freed;
@@ -374,20 +396,16 @@
lock_.AssertHeld(Thread::Current());
byte* original_end = end_;
if (increment != 0) {
- VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
+ VLOG(heap) << "DlMallocSpace::MoreCore " << PrettySize(increment);
byte* new_end = original_end + increment;
if (increment > 0) {
-#if DEBUG_SPACES
// Should never be asked to increase the allocation beyond the capacity of the space. Enforced
// by mspace_set_footprint_limit.
CHECK_LE(new_end, Begin() + Capacity());
-#endif
CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
} else {
-#if DEBUG_SPACES
// Should never be asked for negative footprint (ie before begin)
CHECK_GT(original_end + increment, Begin());
-#endif
// Advise we don't need the pages and protect them
// TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
// expensive (note the same isn't true for giving permissions to a page as the protected
@@ -414,29 +432,13 @@
return InternalAllocationSize(obj);
}
-void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
- // Is this chunk in use?
- if (used_bytes != 0) {
- return;
- }
- // Do we have any whole pages to give back?
- start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
- end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
- if (end > start) {
- size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
- CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
- size_t* reclaimed = reinterpret_cast<size_t*>(arg);
- *reclaimed += length;
- }
-}
-
size_t DlMallocSpace::Trim() {
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
mspace_trim(mspace_, 0);
// Visit space looking for page-sized holes to advise the kernel we don't need.
size_t reclaimed = 0;
- mspace_inspect_all(mspace_, MspaceMadviseCallback, &reclaimed);
+ mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
return reclaimed;
}
@@ -465,111 +467,14 @@
mspace_set_footprint_limit(mspace_, new_size);
}
-size_t ImageSpace::bitmap_index_ = 0;
-
-ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
- : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
- const size_t bitmap_index = bitmap_index_++;
- live_bitmap_.reset(SpaceBitmap::Create(
- StringPrintf("imagespace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
- Begin(), Capacity()));
- DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
-}
-
-ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
- CHECK(!image_file_name.empty());
-
- uint64_t start_time = 0;
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- start_time = NanoTime();
- LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
- }
-
- UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
- if (file.get() == NULL) {
- LOG(ERROR) << "Failed to open " << image_file_name;
- return NULL;
- }
- ImageHeader image_header;
- bool success = file->ReadFully(&image_header, sizeof(image_header));
- if (!success || !image_header.IsValid()) {
- LOG(ERROR) << "Invalid image header " << image_file_name;
- return NULL;
- }
- UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
- file->GetLength(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_FIXED,
- file->Fd(),
- 0,
- false));
- if (map.get() == NULL) {
- LOG(ERROR) << "Failed to map " << image_file_name;
- return NULL;
- }
- CHECK_EQ(image_header.GetImageBegin(), map->Begin());
- DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
-
- Runtime* runtime = Runtime::Current();
- mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
- runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
-
- mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
-
- ImageSpace* space = new ImageSpace(image_file_name, map.release());
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
- << ") " << *space;
- }
- return space;
-}
-
-void ImageSpace::RecordImageAllocations(SpaceBitmap* live_bitmap) const {
- uint64_t start_time = 0;
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
- start_time = NanoTime();
- }
- DCHECK(!Runtime::Current()->IsStarted());
- CHECK(live_bitmap != NULL);
- byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
- byte* end = End();
- while (current < end) {
- DCHECK_ALIGNED(current, kObjectAlignment);
- const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
- live_bitmap->Set(obj);
- current += RoundUp(obj->SizeOf(), kObjectAlignment);
- }
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
- << PrettyDuration(NanoTime() - start_time) << ")";
- }
-}
-
-std::ostream& operator<<(std::ostream& os, const Space& space) {
- space.Dump(os);
- return os;
-}
-
void DlMallocSpace::Dump(std::ostream& os) const {
os << GetType()
- << "begin=" << reinterpret_cast<void*>(Begin())
+ << " begin=" << reinterpret_cast<void*>(Begin())
<< ",end=" << reinterpret_cast<void*>(End())
<< ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
<< ",name=\"" << GetName() << "\"]";
}
-void ImageSpace::Dump(std::ostream& os) const {
- os << GetType()
- << "begin=" << reinterpret_cast<void*>(Begin())
- << ",end=" << reinterpret_cast<void*>(End())
- << ",size=" << PrettySize(Size())
- << ",name=\"" << GetName() << "\"]";
-}
-
+} // namespace space
+} // namespace gc
} // namespace art
diff --git a/src/gc/space/dlmalloc_space.h b/src/gc/space/dlmalloc_space.h
new file mode 100644
index 0000000..00df0e6
--- /dev/null
+++ b/src/gc/space/dlmalloc_space.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+#define ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+
+#include "gc/allocator/dlmalloc.h"
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+ class MarkSweep;
+} // namespace collector
+
+namespace space {
+
+// An alloc space is a space where objects may be allocated and garbage collected.
+class DlMallocSpace : public MemMapSpace, public AllocSpace {
+ public:
+ typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+ SpaceType GetType() const {
+ if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+ return kSpaceTypeZygoteSpace;
+ } else {
+ return kSpaceTypeAllocSpace;
+ }
+ }
+
+ // Create a AllocSpace with the requested sizes. The requested
+ // base address is not guaranteed to be granted, if it is required,
+ // the caller should call Begin on the returned space to confirm
+ // the request was granted.
+ static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin);
+
+ // Allocate num_bytes without allowing the underlying mspace to grow.
+ virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+
+ // Allocate num_bytes allowing the underlying mspace to grow.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj);
+ virtual size_t Free(Thread* self, mirror::Object* ptr);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+
+ void* MoreCore(intptr_t increment);
+
+ void* GetMspace() const {
+ return mspace_;
+ }
+
+ // Hands unused pages back to the system.
+ size_t Trim();
+
+ // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
+ // in use, indicated by num_bytes equaling zero.
+ void Walk(WalkCallback callback, void* arg);
+
+ // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
+ size_t GetFootprintLimit();
+
+ // Set the maximum number of bytes that the heap is allowed to obtain from the system via
+ // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
+ // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
+ void SetFootprintLimit(size_t limit);
+
+ // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+ // maximum reserved size of the heap.
+ void ClearGrowthLimit() {
+ growth_limit_ = NonGrowthLimitCapacity();
+ }
+
+ // Override capacity so that we only return the possibly limited capacity
+ size_t Capacity() const {
+ return growth_limit_;
+ }
+
+ // The total amount of memory reserved for the alloc space.
+ size_t NonGrowthLimitCapacity() const {
+ return GetMemMap()->Size();
+ }
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ return mark_bitmap_.get();
+ }
+
+ void Dump(std::ostream& os) const;
+
+ void SetGrowthLimit(size_t growth_limit);
+
+ // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
+ void SwapBitmaps();
+
+ // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
+ DlMallocSpace* CreateZygoteSpace();
+
+ uint64_t GetBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ uint64_t GetObjectsAllocated() const {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
+ protected:
+ DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
+ size_t growth_limit);
+
+ private:
+ size_t InternalAllocationSize(const mirror::Object* obj);
+ mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
+
+ static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
+
+ UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+ UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
+ UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
+
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
+
+ static size_t bitmap_index_;
+
+ // The boundary tag overhead.
+ static const size_t kChunkOverhead = kWordSize;
+
+ // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // Underlying malloc space
+ void* const mspace_;
+
+ // The capacity of the alloc space until such time that ClearGrowthLimit is called.
+ // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
+ // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
+ // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
+ // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
+ // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
+ // one time by a call to ClearGrowthLimit.
+ size_t growth_limit_;
+
+ friend class collector::MarkSweep;
+
+ DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
diff --git a/src/gc/space/image_space.cc b/src/gc/space/image_space.cc
new file mode 100644
index 0000000..46c3937
--- /dev/null
+++ b/src/gc/space/image_space.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "image_space.h"
+
+#include "base/unix_file/fd_file.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "os.h"
+#include "runtime.h"
+#include "space-inl.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+size_t ImageSpace::bitmap_index_ = 0;
+
+ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
+: MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
+ const size_t bitmap_index = bitmap_index_++;
+ live_bitmap_.reset(accounting::SpaceBitmap::Create(
+ StringPrintf("imagespace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
+ Begin(), Capacity()));
+ DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
+}
+
+ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
+ CHECK(!image_file_name.empty());
+
+ uint64_t start_time = 0;
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ start_time = NanoTime();
+ LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
+ }
+
+ UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
+ if (file.get() == NULL) {
+ LOG(ERROR) << "Failed to open " << image_file_name;
+ return NULL;
+ }
+ ImageHeader image_header;
+ bool success = file->ReadFully(&image_header, sizeof(image_header));
+ if (!success || !image_header.IsValid()) {
+ LOG(ERROR) << "Invalid image header " << image_file_name;
+ return NULL;
+ }
+ UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+ file->GetLength(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED,
+ file->Fd(),
+ 0,
+ false));
+ if (map.get() == NULL) {
+ LOG(ERROR) << "Failed to map " << image_file_name;
+ return NULL;
+ }
+ CHECK_EQ(image_header.GetImageBegin(), map->Begin());
+ DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
+
+ Runtime* runtime = Runtime::Current();
+ mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+ runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
+
+ mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
+ ImageSpace* space = new ImageSpace(image_file_name, map.release());
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+ << ") " << *space;
+ }
+ return space;
+}
+
+void ImageSpace::RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const {
+ uint64_t start_time = 0;
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
+ start_time = NanoTime();
+ }
+ DCHECK(!Runtime::Current()->IsStarted());
+ CHECK(live_bitmap != NULL);
+ byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+ byte* end = End();
+ while (current < end) {
+ DCHECK_ALIGNED(current, kObjectAlignment);
+ const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
+ live_bitmap->Set(obj);
+ current += RoundUp(obj->SizeOf(), kObjectAlignment);
+ }
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
+ << PrettyDuration(NanoTime() - start_time) << ")";
+ }
+}
+
+void ImageSpace::Dump(std::ostream& os) const {
+ os << GetType()
+ << "begin=" << reinterpret_cast<void*>(Begin())
+ << ",end=" << reinterpret_cast<void*>(End())
+ << ",size=" << PrettySize(Size())
+ << ",name=\"" << GetName() << "\"]";
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/src/gc/space/image_space.h b/src/gc/space/image_space.h
new file mode 100644
index 0000000..afec5b7
--- /dev/null
+++ b/src/gc/space/image_space.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+#define ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// An image space is a space backed with a memory mapped image.
+class ImageSpace : public MemMapSpace {
+ public:
+ bool CanAllocateInto() const {
+ return false;
+ }
+
+ SpaceType GetType() const {
+ return kSpaceTypeImageSpace;
+ }
+
+ // create a Space from an image file. cannot be used for future allocation or collected.
+ static ImageSpace* Create(const std::string& image)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const ImageHeader& GetImageHeader() const {
+ return *reinterpret_cast<ImageHeader*>(Begin());
+ }
+
+ const std::string GetImageFilename() const {
+ return GetName();
+ }
+
+ // Mark the objects defined in this space in the given live bitmap
+ void RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ accounting::SpaceBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
+
+ accounting::SpaceBitmap* GetMarkBitmap() const {
+ // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
+ // special cases to test against.
+ return live_bitmap_.get();
+ }
+
+ void Dump(std::ostream& os) const;
+
+ private:
+ friend class Space;
+
+ static size_t bitmap_index_;
+
+ UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+
+ ImageSpace(const std::string& name, MemMap* mem_map);
+
+ DISALLOW_COPY_AND_ASSIGN(ImageSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_IMAGE_SPACE_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/space/large_object_space.cc
similarity index 97%
rename from src/gc/large_object_space.cc
rename to src/gc/space/large_object_space.cc
index c3bf382..3cee1b7 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/space/large_object_space.cc
@@ -14,18 +14,19 @@
* limitations under the License.
*/
+#include "large_object_space.h"
+
#include "base/logging.h"
#include "base/stl_util.h"
-#include "large_object_space.h"
#include "UniquePtr.h"
-#include "dlmalloc.h"
#include "image.h"
#include "os.h"
-#include "space_bitmap.h"
#include "thread.h"
#include "utils.h"
namespace art {
+namespace gc {
+namespace space {
void LargeObjectSpace::SwapBitmaps() {
live_objects_.swap(mark_objects_);
@@ -39,8 +40,6 @@
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
total_objects_allocated_(0) {
- live_objects_.reset(new SpaceSetMap("large live objects"));
- mark_objects_.reset(new SpaceSetMap("large marked objects"));
}
@@ -281,4 +280,6 @@
<< " end: " << reinterpret_cast<void*>(End());
}
-}
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/src/gc/large_object_space.h b/src/gc/space/large_object_space.h
similarity index 84%
rename from src/gc/large_object_space.h
rename to src/gc/space/large_object_space.h
index 8a2f970..197fad3 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/space/large_object_space.h
@@ -14,51 +14,38 @@
* limitations under the License.
*/
-#ifndef ART_SRC_GC_LARGE_OBJECT_SPACE_H_
-#define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+#ifndef ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+#define ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
-#include "space.h"
+
+#include "dlmalloc_space.h"
#include "safe_map.h"
+#include "space.h"
#include <set>
#include <vector>
namespace art {
-class SpaceSetMap;
+namespace gc {
+namespace space {
// Abstraction implemented by all large object spaces.
class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
public:
- virtual bool CanAllocateInto() const {
- return true;
- }
-
- virtual bool IsCompactible() const {
- return true;
- }
-
virtual SpaceType GetType() const {
return kSpaceTypeLargeObjectSpace;
}
- virtual SpaceSetMap* GetLiveObjects() const {
- return live_objects_.get();
- }
-
- virtual SpaceSetMap* GetMarkObjects() const {
- return mark_objects_.get();
- }
-
virtual void SwapBitmaps();
virtual void CopyLiveToMarked();
virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
virtual ~LargeObjectSpace() {}
- uint64_t GetNumBytesAllocated() const {
+ uint64_t GetBytesAllocated() const {
return num_bytes_allocated_;
}
- uint64_t GetNumObjectsAllocated() const {
+ uint64_t GetObjectsAllocated() const {
return num_objects_allocated_;
}
@@ -82,10 +69,10 @@
size_t total_bytes_allocated_;
size_t total_objects_allocated_;
- UniquePtr<SpaceSetMap> live_objects_;
- UniquePtr<SpaceSetMap> mark_objects_;
-
friend class Space;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
};
// A discontinuous large object space implemented by individual mmap/munmap calls.
@@ -96,12 +83,13 @@
static LargeObjectMapSpace* Create(const std::string& name);
// Return the storage space required by obj.
- virtual size_t AllocationSize(const mirror::Object* obj);
- virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+ size_t AllocationSize(const mirror::Object* obj);
+ mirror::Object* Alloc(Thread* self, size_t num_bytes);
size_t Free(Thread* self, mirror::Object* ptr);
- virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
+ void Walk(DlMallocSpace::WalkCallback, void* arg);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
- virtual bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+ bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+
private:
LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
@@ -114,6 +102,7 @@
};
// A continuous large object space with a free-list to handle holes.
+// TODO: this implementation is buggy.
class FreeListSpace : public LargeObjectSpace {
public:
virtual ~FreeListSpace();
@@ -140,7 +129,7 @@
return End() - Begin();
}
- virtual void Dump(std::ostream& os) const;
+ void Dump(std::ostream& os) const;
private:
static const size_t kAlignment = kPageSize;
@@ -197,6 +186,8 @@
FreeChunks free_chunks_ GUARDED_BY(lock_);
};
-}
+} // namespace space
+} // namespace gc
+} // namespace art
-#endif // ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+#endif // ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
diff --git a/src/gc/space/space-inl.h b/src/gc/space/space-inl.h
new file mode 100644
index 0000000..8216d1b
--- /dev/null
+++ b/src/gc/space/space-inl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_INL_H_
+#define ART_SRC_GC_SPACE_SPACE_INL_H_
+
+#include "space.h"
+
+#include "dlmalloc_space.h"
+#include "image_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline ImageSpace* Space::AsImageSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+ return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline DlMallocSpace* Space::AsDlMallocSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
+ return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline DlMallocSpace* Space::AsZygoteSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
+ return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline LargeObjectSpace* Space::AsLargeObjectSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
+ return reinterpret_cast<LargeObjectSpace*>(this);
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_SPACE_INL_H_
diff --git a/src/gc/space/space.cc b/src/gc/space/space.cc
new file mode 100644
index 0000000..eae281a
--- /dev/null
+++ b/src/gc/space/space.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space.h"
+
+#include "base/logging.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy) :
+ name_(name), gc_retention_policy_(gc_retention_policy) { }
+
+void Space::Dump(std::ostream& os) const {
+ os << GetName() << ":" << GetGcRetentionPolicy();
+}
+
+std::ostream& operator<<(std::ostream& os, const Space& space) {
+ space.Dump(os);
+ return os;
+}
+
+
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+ GcRetentionPolicy gc_retention_policy) :
+ Space(name, gc_retention_policy),
+ live_objects_(new accounting::SpaceSetMap("large live objects")),
+ mark_objects_(new accounting::SpaceSetMap("large marked objects")) {
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
diff --git a/src/gc/space/space.h b/src/gc/space/space.h
new file mode 100644
index 0000000..ca01c55
--- /dev/null
+++ b/src/gc/space/space.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_H_
+#define ART_SRC_GC_SPACE_SPACE_H_
+
+#include <string>
+
+#include "UniquePtr.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/accounting/space_bitmap.h"
+#include "globals.h"
+#include "image.h"
+#include "mem_map.h"
+
+namespace art {
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+namespace gc {
+
+namespace accounting {
+ class SpaceBitmap;
+} // namespace accounting
+
+class Heap;
+
+namespace space {
+
+class DlMallocSpace;
+class ImageSpace;
+class LargeObjectSpace;
+
+static const bool kDebugSpaces = kIsDebugBuild;
+
+// See Space::GetGcRetentionPolicy.
+enum GcRetentionPolicy {
+ // Objects are retained forever with this policy for a space.
+ kGcRetentionPolicyNeverCollect,
+ // Every GC cycle will attempt to collect objects in this space.
+ kGcRetentionPolicyAlwaysCollect,
+ // Objects will be considered for collection only in "full" GC cycles, ie faster partial
+ // collections won't scan these areas such as the Zygote.
+ kGcRetentionPolicyFullCollect,
+};
+std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+
+enum SpaceType {
+ kSpaceTypeImageSpace,
+ kSpaceTypeAllocSpace,
+ kSpaceTypeZygoteSpace,
+ kSpaceTypeLargeObjectSpace,
+};
+std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
+
+// A space contains memory allocated for managed objects.
+class Space {
+ public:
+ // Dump space. Also key method for C++ vtables.
+ virtual void Dump(std::ostream& os) const;
+
+ // Name of the space. May vary, for example before/after the Zygote fork.
+ const char* GetName() const {
+ return name_.c_str();
+ }
+
+ // The policy of when objects are collected associated with this space.
+ GcRetentionPolicy GetGcRetentionPolicy() const {
+ return gc_retention_policy_;
+ }
+
+ // Does the space support allocation?
+ virtual bool CanAllocateInto() const {
+ return true;
+ }
+
+ // Is the given object contained within this space?
+ virtual bool Contains(const mirror::Object* obj) const = 0;
+
+ // The kind of space this: image, alloc, zygote, large object.
+ virtual SpaceType GetType() const = 0;
+
+ // Is this an image space, ie one backed by a memory mapped image file.
+ bool IsImageSpace() const {
+ return GetType() == kSpaceTypeImageSpace;
+ }
+ ImageSpace* AsImageSpace();
+
+ // Is this a dlmalloc backed allocation space?
+ bool IsDlMallocSpace() const {
+ SpaceType type = GetType();
+ return type == kSpaceTypeAllocSpace || type == kSpaceTypeZygoteSpace;
+ }
+ DlMallocSpace* AsDlMallocSpace();
+
+ // Is this the space allocated into by the Zygote and no-longer in use?
+ bool IsZygoteSpace() const {
+ return GetType() == kSpaceTypeZygoteSpace;
+ }
+ DlMallocSpace* AsZygoteSpace();
+
+ // Does this space hold large objects and implement the large object space abstraction?
+ bool IsLargeObjectSpace() const {
+ return GetType() == kSpaceTypeLargeObjectSpace;
+ }
+ LargeObjectSpace* AsLargeObjectSpace();
+
+ virtual ~Space() {}
+
+ protected:
+ Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+ void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+ gc_retention_policy_ = gc_retention_policy;
+ }
+
+ // Name of the space that may vary due to the Zygote fork.
+ std::string name_;
+
+ private:
+ // When should objects within this space be reclaimed? Not constant as we vary it in the case
+ // of Zygote forking.
+ GcRetentionPolicy gc_retention_policy_;
+
+ friend class art::gc::Heap;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
+};
+std::ostream& operator<<(std::ostream& os, const Space& space);
+
+// AllocSpace interface.
+class AllocSpace {
+ public:
+ // Number of bytes currently allocated.
+ virtual uint64_t GetBytesAllocated() const = 0;
+ // Number of objects currently allocated.
+ virtual uint64_t GetObjectsAllocated() const = 0;
+ // Number of bytes allocated since the space was created.
+ virtual uint64_t GetTotalBytesAllocated() const = 0;
+ // Number of objects allocated since the space was created.
+ virtual uint64_t GetTotalObjectsAllocated() const = 0;
+
+ // Allocate num_bytes without allowing growth.
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const mirror::Object* obj) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
+
+ protected:
+ AllocSpace() {}
+ virtual ~AllocSpace() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+};
+
+// Continuous spaces have bitmaps, and an address range. Although not required, objects within
+// continuous spaces can be marked in the card table.
+class ContinuousSpace : public Space {
+ public:
+ // Address at which the space begins
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+
+ virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
+ virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+
+ // Is object within this space? We check to see if the pointer is beyond the end first as
+ // continuous spaces are iterated over from low to high.
+ bool HasAddress(const mirror::Object* obj) const {
+ const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+ return byte_ptr < End() && byte_ptr >= Begin();
+ }
+
+ bool Contains(const mirror::Object* obj) const {
+ return HasAddress(obj);
+ }
+
+ virtual ~ContinuousSpace() {}
+
+ protected:
+ ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
+ byte* begin, byte* end) :
+ Space(name, gc_retention_policy), begin_(begin), end_(end) {
+ }
+
+
+ // The beginning of the storage for fast access.
+ byte* const begin_;
+
+ // Current end of the space.
+ byte* end_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+};
+
+// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
+// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
+// is suitable for use for large primitive arrays.
+class DiscontinuousSpace : public Space {
+ public:
+ accounting::SpaceSetMap* GetLiveObjects() const {
+ return live_objects_.get();
+ }
+
+ accounting::SpaceSetMap* GetMarkObjects() const {
+ return mark_objects_.get();
+ }
+
+ virtual ~DiscontinuousSpace() {}
+
+ protected:
+ DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+ UniquePtr<accounting::SpaceSetMap> live_objects_;
+ UniquePtr<accounting::SpaceSetMap> mark_objects_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+};
+
+class MemMapSpace : public ContinuousSpace {
+ public:
+ // Maximum which the mapped space can grow to.
+ virtual size_t Capacity() const {
+ return mem_map_->Size();
+ }
+
+ // Size of the space without a limit on its growth. By default this is just the Capacity, but
+ // for the allocation space we support starting with a small heap and then extending it.
+ virtual size_t NonGrowthLimitCapacity() const {
+ return Capacity();
+ }
+
+ protected:
+ MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+ GcRetentionPolicy gc_retention_policy)
+ : ContinuousSpace(name, gc_retention_policy,
+ mem_map->Begin(), mem_map->Begin() + initial_size),
+ mem_map_(mem_map) {
+ }
+
+ MemMap* GetMemMap() {
+ return mem_map_.get();
+ }
+
+ const MemMap* GetMemMap() const {
+ return mem_map_.get();
+ }
+
+ private:
+ // Underlying storage of the space
+ UniquePtr<MemMap> mem_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+};
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_SRC_GC_SPACE_SPACE_H_
diff --git a/src/gc/space_test.cc b/src/gc/space/space_test.cc
similarity index 97%
rename from src/gc/space_test.cc
rename to src/gc/space/space_test.cc
index 372ec77..08ae894 100644
--- a/src/gc/space_test.cc
+++ b/src/gc/space/space_test.cc
@@ -14,22 +14,27 @@
* limitations under the License.
*/
-#include "space.h"
+#include "dlmalloc_space.h"
#include "common_test.h"
-#include "dlmalloc.h"
#include "globals.h"
#include "UniquePtr.h"
#include <stdint.h>
namespace art {
+namespace gc {
+namespace space {
class SpaceTest : public CommonTest {
public:
void SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
int round, size_t growth_limit);
void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
+
+ void AddContinuousSpace(ContinuousSpace* space) {
+ Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+ }
};
TEST_F(SpaceTest, Init) {
@@ -79,7 +84,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Runtime::Current()->GetHeap()->AddSpace(space);
+ AddContinuousSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the footprint limit.
@@ -121,7 +126,7 @@
space = space->CreateZygoteSpace();
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Runtime::Current()->GetHeap()->AddSpace(space);
+ AddContinuousSpace(space);
// Succeeds, fits without adjusting the footprint limit.
ptr1 = space->Alloc(self, 1 * MB);
@@ -148,7 +153,7 @@
Thread* self = Thread::Current();
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Runtime::Current()->GetHeap()->AddSpace(space);
+ AddContinuousSpace(space);
// Succeeds, fits without adjusting the footprint limit.
mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
@@ -190,7 +195,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Runtime::Current()->GetHeap()->AddSpace(space);
+ AddContinuousSpace(space);
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the max allowed footprint.
@@ -384,7 +389,7 @@
EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Runtime::Current()->GetHeap()->AddSpace(space);
+ AddContinuousSpace(space);
// In this round we don't allocate with growth and therefore can't grow past the initial size.
// This effectively makes the growth_limit the initial_size, so assert this.
@@ -419,4 +424,6 @@
TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
+} // namespace space
+} // namespace gc
} // namespace art
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
deleted file mode 100644
index 988d4e7..0000000
--- a/src/gc/sticky_mark_sweep.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "heap.h"
-#include "large_object_space.h"
-#include "space.h"
-#include "sticky_mark_sweep.h"
-#include "thread.h"
-
-namespace art {
-
-StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
- : PartialMarkSweep(heap, is_concurrent) {
- cumulative_timings_.SetName(GetName());
-}
-
-StickyMarkSweep::~StickyMarkSweep() {
-
-}
-
-void StickyMarkSweep::BindBitmaps() {
- PartialMarkSweep::BindBitmaps();
-
- Spaces& spaces = GetHeap()->GetSpaces();
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
- // This lets us start with the mark bitmap of the previous garbage collection as the current
- // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
- // making it so that the live bitmap of the alloc space is contains the newly marked objects
- // from the sticky GC.
- for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
- BindLiveToMarkBitmap(*it);
- }
- }
-
- GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
-}
-
-void StickyMarkSweep::MarkReachableObjects() {
- DisableFinger();
- RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
-}
-
-void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
- ObjectStack* live_stack = GetHeap()->GetLiveStack();
- SweepArray(timings_, live_stack, false);
- timings_.AddSplit("SweepArray");
-}
-
-} // namespace art