blob: bf3e3f02ba19b4dd86936593ec414198634386e6 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Elliott Hughes07ed66b2012-12-12 18:34:25 -080024#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080025#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080027#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070028#include "gc/accounting/card_table-inl.h"
29#include "gc/accounting/heap_bitmap.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/image_space.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070035#include "indirect_reference_table.h"
36#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070037#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070038#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "mark_sweep-inl.h"
40#include "mirror/class-inl.h"
41#include "mirror/class_loader.h"
42#include "mirror/dex_cache.h"
43#include "mirror/field.h"
44#include "mirror/field-inl.h"
45#include "mirror/object-inl.h"
46#include "mirror/object_array.h"
47#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070048#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070049#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070050#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070051#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070053using ::art::mirror::Class;
54using ::art::mirror::Field;
55using ::art::mirror::Object;
56using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080057
Carl Shapiro69759ea2011-07-21 18:13:35 -070058namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070059namespace gc {
60namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070061
Mathieu Chartier02b6a782012-10-26 13:51:26 -070062// Performance options.
63static const bool kParallelMarkStack = true;
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
Mathieu Chartier858f1c52012-10-17 17:45:55 -070065static const bool kUseMarkStackPrefetch = true;
66
Mathieu Chartier02b6a782012-10-26 13:51:26 -070067// Profiling and information flags.
68static const bool kCountClassesMarked = false;
69static const bool kProfileLargeObjects = false;
70static const bool kMeasureOverhead = false;
71static const bool kCountTasks = false;
Mathieu Chartierd22d5482012-11-06 17:14:12 -080072static const bool kCountJavaLangRefs = false;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070073
Ian Rogers1d54e732013-05-02 21:10:01 -070074void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080075 // Bind live to mark bitmap if necessary.
76 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
77 BindLiveToMarkBitmap(space);
78 }
79
80 // Add the space to the immune region.
81 if (immune_begin_ == NULL) {
82 DCHECK(immune_end_ == NULL);
83 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
84 reinterpret_cast<Object*>(space->End()));
85 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -070086 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
87 const space::ContinuousSpace* prev_space = NULL;
88 // Find out if the previous space is immune.
89 // TODO: C++0x
90 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
91 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
92 if (*it == space) {
93 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080094 }
Ian Rogers1d54e732013-05-02 21:10:01 -070095 prev_space = *it;
96 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -080097
Ian Rogers1d54e732013-05-02 21:10:01 -070098 // If previous space was immune, then extend the immune region. Relies on continuous spaces
99 // being sorted by Heap::AddContinuousSpace.
100 if (prev_space != NULL &&
101 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
102 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800103 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
104 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
105 }
106 }
107}
108
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800109void MarkSweep::BindBitmaps() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700110 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800111 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
112
113 // Mark all of the spaces we never collect as immune.
Ian Rogers1d54e732013-05-02 21:10:01 -0700114 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
115 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
116 space::ContinuousSpace* space = *it;
117 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800118 ImmuneSpace(space);
119 }
120 }
121}
122
Ian Rogers1d54e732013-05-02 21:10:01 -0700123MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
124 : GarbageCollector(heap,
125 name_prefix + (name_prefix.empty() ? "" : " ") +
126 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
127 current_mark_bitmap_(NULL),
128 java_lang_Class_(NULL),
129 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700130 immune_begin_(NULL),
131 immune_end_(NULL),
132 soft_reference_list_(NULL),
133 weak_reference_list_(NULL),
134 finalizer_reference_list_(NULL),
135 phantom_reference_list_(NULL),
136 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800137 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800138 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
139 mark_stack_expand_lock_("mark sweep mark stack expand lock"),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700140 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700141 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800142}
143
144void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 timings_.Reset();
146 timings_.StartSplit("InitializePhase");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800147 mark_stack_ = GetHeap()->mark_stack_.get();
148 DCHECK(mark_stack_ != NULL);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800149 SetImmuneRange(NULL, NULL);
150 soft_reference_list_ = NULL;
151 weak_reference_list_ = NULL;
152 finalizer_reference_list_ = NULL;
153 phantom_reference_list_ = NULL;
154 cleared_reference_list_ = NULL;
155 freed_bytes_ = 0;
156 freed_objects_ = 0;
157 class_count_ = 0;
158 array_count_ = 0;
159 other_count_ = 0;
160 large_object_test_ = 0;
161 large_object_mark_ = 0;
162 classes_marked_ = 0;
163 overhead_time_ = 0;
164 work_chunks_created_ = 0;
165 work_chunks_deleted_ = 0;
166 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700167 java_lang_Class_ = Class::GetJavaLangClass();
168 CHECK(java_lang_Class_ != NULL);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700169 FindDefaultMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800170 // Do any pre GC verification.
171 heap_->PreGcVerification(this);
172}
173
174void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700175 timings_.NewSplit("ProcessReferences");
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800176 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800177 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
178 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800179}
180
181bool MarkSweep::HandleDirtyObjectsPhase() {
182 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700183 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800184 Locks::mutator_lock_->AssertExclusiveHeld(self);
185
186 {
Ian Rogers1d54e732013-05-02 21:10:01 -0700187 timings_.NewSplit("ReMarkRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
189
190 // Re-mark root set.
191 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800192
193 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Ian Rogers1d54e732013-05-02 21:10:01 -0700194 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800195 }
196
197 ProcessReferences(self);
198
199 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
200 if (GetHeap()->verify_missing_card_marks_) {
201 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
202 // This second sweep makes sure that we don't have any objects in the live stack which point to
203 // freed objects. These cause problems since their references may be previously freed objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700204 SweepArray(allocation_stack, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800205 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700206 timings_.NewSplit("UnMarkAllocStack");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800207 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700208 // The allocation stack contains things allocated since the start of the GC. These may have been
209 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
210 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
211 // collection.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800212 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
Ian Rogers1d54e732013-05-02 21:10:01 -0700213 GetHeap()->large_object_space_->GetMarkObjects(),
214 allocation_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800215 }
216 return true;
217}
218
219bool MarkSweep::IsConcurrent() const {
220 return is_concurrent_;
221}
222
223void MarkSweep::MarkingPhase() {
224 Heap* heap = GetHeap();
225 Thread* self = Thread::Current();
226
Ian Rogers1d54e732013-05-02 21:10:01 -0700227 timings_.NewSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800228 BindBitmaps();
229 FindDefaultMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800230 // Process dirty cards and add dirty cards to mod union tables.
231 heap->ProcessCards(timings_);
232
233 // Need to do this before the checkpoint since we don't want any threads to add references to
234 // the live stack during the recursive mark.
Ian Rogers1d54e732013-05-02 21:10:01 -0700235 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800236 heap->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800237
238 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
239 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
240 // If we exclusively hold the mutator lock, all threads must be suspended.
Ian Rogers1d54e732013-05-02 21:10:01 -0700241 timings_.NewSplit("MarkRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800242 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800243 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700244 timings_.NewSplit("MarkRootsCheckpoint");
245 MarkRootsCheckpoint(self);
246 timings_.NewSplit("MarkNonThreadRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800247 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800248 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700249 timings_.NewSplit("MarkConcurrentRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800250 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800251
252 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
253 MarkReachableObjects();
254}
255
256void MarkSweep::MarkReachableObjects() {
257 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
258 // knowing that new allocations won't be marked as live.
Ian Rogers1d54e732013-05-02 21:10:01 -0700259 timings_.NewSplit("MarkStackAsLive");
260 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
262 heap_->large_object_space_->GetLiveObjects(),
263 live_stack);
264 live_stack->Reset();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800265 // Recursively mark all the non-image bits set in the mark bitmap.
266 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800267}
268
269void MarkSweep::ReclaimPhase() {
270 Thread* self = Thread::Current();
271
272 if (!IsConcurrent()) {
273 ProcessReferences(self);
274 }
275
276 // Before freeing anything, lets verify the heap.
277 if (kIsDebugBuild) {
278 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
279 VerifyImageRoots();
280 }
281 heap_->PreSweepingGcVerification(this);
282
283 {
284 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
285
286 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700287 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800288
289 // Swap the live and mark bitmaps for each space which we modified space. This is an
290 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
291 // bitmaps.
Ian Rogers1d54e732013-05-02 21:10:01 -0700292 timings_.NewSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800293 SwapBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800294
295 // Unbind the live and mark bitmaps.
296 UnBindBitmaps();
297 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800298}
299
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800300void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
301 immune_begin_ = begin;
302 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700303}
304
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700305void MarkSweep::FindDefaultMarkBitmap() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700306 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
307 // TODO: C++0x
308 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
309 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
310 space::ContinuousSpace* space = *it;
311 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700312 current_mark_bitmap_ = (*it)->GetMarkBitmap();
313 CHECK(current_mark_bitmap_ != NULL);
314 return;
315 }
316 }
317 GetHeap()->DumpSpaces();
318 LOG(FATAL) << "Could not find a default mark bitmap";
319}
320
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800321void MarkSweep::ExpandMarkStack() {
322 // Rare case, no need to have Thread::Current be a parameter.
323 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
324 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
325 // Someone else acquired the lock and expanded the mark stack before us.
326 return;
327 }
328 std::vector<Object*> temp;
329 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
330 mark_stack_->Resize(mark_stack_->Capacity() * 2);
331 for (size_t i = 0; i < temp.size(); ++i) {
332 mark_stack_->PushBack(temp[i]);
333 }
334}
335
336inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) {
337 DCHECK(obj != NULL);
338 if (MarkObjectParallel(obj)) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700339 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
340 // Only reason a push can fail is that the mark stack is full.
341 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800342 }
343 }
344}
345
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700346inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700347 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700348
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700349 if (obj >= immune_begin_ && obj < immune_end_) {
350 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700351 return;
352 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700353
354 // Try to take advantage of locality of references within a space, failing this find the space
355 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700356 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700357 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700358 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
359 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700360 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700361 } else {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700362 MarkLargeObject(obj);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700363 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700364 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700365 }
366
Carl Shapiro69759ea2011-07-21 18:13:35 -0700367 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700368 if (!object_bitmap->Test(obj)) {
369 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700370 // Do we need to expand the mark stack?
371 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
372 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700373 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700374 // The object must be pushed on to the mark stack.
375 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700376 }
377}
378
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700379// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
380bool MarkSweep::MarkLargeObject(const Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700381 // TODO: support >1 discontinuous space.
382 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
383 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700384 if (kProfileLargeObjects) {
385 ++large_object_test_;
386 }
387 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700388 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700389 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
390 LOG(ERROR) << "Attempting see if it's a bad root";
391 VerifyRoots();
392 LOG(FATAL) << "Can't mark bad root";
393 }
394 if (kProfileLargeObjects) {
395 ++large_object_mark_;
396 }
397 large_objects->Set(obj);
398 // Don't need to check finger since large objects never have any object references.
399 return true;
400 }
401 return false;
402}
403
404inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
405 DCHECK(obj != NULL);
406
407 if (obj >= immune_begin_ && obj < immune_end_) {
408 DCHECK(IsMarked(obj));
409 return false;
410 }
411
412 // Try to take advantage of locality of references within a space, failing this find the space
413 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700414 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700415 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700416 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700417 if (new_bitmap != NULL) {
418 object_bitmap = new_bitmap;
419 } else {
420 // TODO: Remove the Thread::Current here?
421 // TODO: Convert this to some kind of atomic marking?
422 MutexLock mu(Thread::Current(), large_object_lock_);
423 return MarkLargeObject(obj);
424 }
425 }
426
427 // Return true if the object was not previously marked.
428 return !object_bitmap->AtomicTestAndSet(obj);
429}
430
Carl Shapiro69759ea2011-07-21 18:13:35 -0700431// Used to mark objects when recursing. Recursion is done by moving
432// the finger across the bitmaps in address order and marking child
433// objects. Any newly-marked objects whose addresses are lower than
434// the finger won't be visited by the bitmap scan, so those objects
435// need to be added to the mark stack.
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700436void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700437 if (obj != NULL) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700438 MarkObjectNonNull(obj, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700439 }
440}
441
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800442void MarkSweep::MarkRoot(const Object* obj) {
443 if (obj != NULL) {
444 MarkObjectNonNull(obj, false);
445 }
446}
447
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800448void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
449 DCHECK(root != NULL);
450 DCHECK(arg != NULL);
451 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
452 mark_sweep->MarkObjectNonNullParallel(root, false);
453}
454
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700455void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700456 DCHECK(root != NULL);
457 DCHECK(arg != NULL);
458 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700459 mark_sweep->MarkObjectNonNull(root, false);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700460}
461
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700462void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
463 DCHECK(root != NULL);
464 DCHECK(arg != NULL);
465 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700466 mark_sweep->MarkObjectNonNull(root, true);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700467}
468
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700469void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800470 const StackVisitor* visitor) {
471 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700472}
473
Ian Rogers40e3bac2012-11-20 00:09:14 -0800474void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700475 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700476 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
477 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700478 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700479 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800480 if (visitor != NULL) {
481 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700482 }
483 }
484 }
485}
486
487void MarkSweep::VerifyRoots() {
488 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
489}
490
Carl Shapiro69759ea2011-07-21 18:13:35 -0700491// Marks all objects in the root set.
492void MarkSweep::MarkRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700493 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700494}
495
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700496void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700497 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700498}
499
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700500void MarkSweep::MarkConcurrentRoots() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700501 // Visit all runtime roots and clear dirty flags.
502 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700503}
504
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700505class CheckObjectVisitor {
506 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700507 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700508
Brian Carlstromdf629502013-07-17 22:39:56 -0700509 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800510 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800511 if (kDebugLocking) {
512 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
513 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700514 mark_sweep_->CheckReference(obj, ref, offset, is_static);
515 }
516
517 private:
518 MarkSweep* const mark_sweep_;
519};
520
521void MarkSweep::CheckObject(const Object* obj) {
522 DCHECK(obj != NULL);
523 CheckObjectVisitor visitor(this);
524 VisitObjectReferences(obj, visitor);
525}
526
527void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
528 DCHECK(root != NULL);
529 DCHECK(arg != NULL);
530 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700531 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700532 mark_sweep->CheckObject(root);
533}
534
Ian Rogers1d54e732013-05-02 21:10:01 -0700535void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
536 CHECK(space->IsDlMallocSpace());
537 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
538 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
539 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700540 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
541 alloc_space->temp_bitmap_.reset(mark_bitmap);
542 alloc_space->mark_bitmap_.reset(live_bitmap);
543}
544
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700545class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700546 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700547 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700548
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800549 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -0700550 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800551 if (kDebugLocking) {
552 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
553 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
554 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700555 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700556 }
557
558 private:
559 MarkSweep* const mark_sweep_;
560};
561
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800562void MarkSweep::ScanGrayObjects(byte minimum_age) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700563 accounting::CardTable* card_table = GetHeap()->GetCardTable();
564 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700565 ScanObjectVisitor visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700566 // TODO: C++0x
567 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
568 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
569 space::ContinuousSpace* space = *it;
570 switch (space->GetGcRetentionPolicy()) {
571 case space::kGcRetentionPolicyNeverCollect:
572 timings_.NewSplit("ScanGrayImageSpaceObjects");
573 break;
574 case space::kGcRetentionPolicyFullCollect:
575 timings_.NewSplit("ScanGrayZygoteSpaceObjects");
576 break;
577 case space::kGcRetentionPolicyAlwaysCollect:
578 timings_.NewSplit("ScanGrayAllocSpaceObjects");
579 break;
580 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700581 byte* begin = space->Begin();
582 byte* end = space->End();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700583 // Image spaces are handled properly since live == marked for them.
Ian Rogers1d54e732013-05-02 21:10:01 -0700584 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier184e3222013-08-03 14:02:57 -0700585 card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700586 }
587}
588
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700589class CheckBitmapVisitor {
590 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700591 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700592
Brian Carlstromdf629502013-07-17 22:39:56 -0700593 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800594 if (kDebugLocking) {
595 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
596 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700597 DCHECK(obj != NULL);
598 mark_sweep_->CheckObject(obj);
599 }
600
601 private:
602 MarkSweep* mark_sweep_;
603};
604
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700605void MarkSweep::VerifyImageRoots() {
606 // Verify roots ensures that all the references inside the image space point
607 // objects which are either in the image space or marked objects in the alloc
608 // space
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700609 CheckBitmapVisitor visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700610 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
611 // TODO: C++0x
612 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
613 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700614 if ((*it)->IsImageSpace()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700615 space::ImageSpace* space = (*it)->AsImageSpace();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700616 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
617 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700618 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700619 DCHECK(live_bitmap != NULL);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700620 live_bitmap->VisitMarkedRange(begin, end, visitor);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700621 }
622 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700623}
624
Carl Shapiro58551df2011-07-24 03:09:51 -0700625// Populates the mark stack based on the set of marked objects and
626// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800627void MarkSweep::RecursiveMark() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700628 timings_.NewSplit("RecursiveMark");
Brian Carlstrom1f870082011-08-23 16:02:11 -0700629 // RecursiveMark will build the lists of known instances of the Reference classes.
630 // See DelayReferenceReferent for details.
631 CHECK(soft_reference_list_ == NULL);
632 CHECK(weak_reference_list_ == NULL);
633 CHECK(finalizer_reference_list_ == NULL);
634 CHECK(phantom_reference_list_ == NULL);
635 CHECK(cleared_reference_list_ == NULL);
636
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800637 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700638 ScanObjectVisitor scan_visitor(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800639 if (!kDisableFinger) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700640 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
641 // TODO: C++0x
642 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
643 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
644 space::ContinuousSpace* space = *it;
645 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
646 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800647 current_mark_bitmap_ = space->GetMarkBitmap();
648 if (current_mark_bitmap_ == NULL) {
649 GetHeap()->DumpSpaces();
650 LOG(FATAL) << "invalid bitmap";
651 }
652 // This function does not handle heap end increasing, so we must use the space end.
653 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
654 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier184e3222013-08-03 14:02:57 -0700655 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700656 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700657 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700658 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700659 timings_.NewSplit("ProcessMarkStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700660 ProcessMarkStack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700661}
662
663bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
664 return
665 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700666 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
667}
668
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800669void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
670 ScanGrayObjects(minimum_age);
Ian Rogers1d54e732013-05-02 21:10:01 -0700671 timings_.NewSplit("ProcessMarkStack");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700672 ProcessMarkStack();
673}
674
Carl Shapiro58551df2011-07-24 03:09:51 -0700675void MarkSweep::ReMarkRoots() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700676 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700677}
678
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800679void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700680 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700681 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700682 IndirectReferenceTable* table = &vm->weak_globals;
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700683 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
Elliott Hughes410c0c82011-09-01 17:58:25 -0700684 for (It it = table->begin(), end = table->end(); it != end; ++it) {
685 const Object** entry = *it;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700686 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700687 *entry = kClearedJniWeakGlobal;
688 }
689 }
690}
691
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700692struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -0700693 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700694 MarkSweep* mark_sweep;
695};
696
697// Either marked or not live.
698bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
699 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
700 if (array_check->mark_sweep->IsMarked(object)) {
701 return true;
702 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700703 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700704 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
705}
706
Ian Rogers1d54e732013-05-02 21:10:01 -0700707void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700708 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700709 // The callbacks check
710 // !is_marked where is_marked is the callback but we want
711 // !IsMarked && IsLive
712 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
713 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700714
715 ArrayMarkedCheck visitor;
716 visitor.live_stack = allocations;
717 visitor.mark_sweep = this;
718 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
719 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
720 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
721}
722
723void MarkSweep::SweepSystemWeaks() {
724 Runtime* runtime = Runtime::Current();
725 // The callbacks check
726 // !is_marked where is_marked is the callback but we want
727 // !IsMarked && IsLive
728 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
729 // Or for swapped (IsLive || !IsMarked).
730 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
731 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
732 SweepJniWeakGlobals(IsMarkedCallback, this);
Carl Shapiro58551df2011-07-24 03:09:51 -0700733}
734
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700735bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
736 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
737 // We don't actually want to sweep the object, so lets return "marked"
738 return true;
739}
740
741void MarkSweep::VerifyIsLive(const Object* obj) {
742 Heap* heap = GetHeap();
743 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700744 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700745 if (!large_object_space->GetLiveObjects()->Test(obj)) {
746 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
747 heap->allocation_stack_->End()) {
748 // Object not found!
749 heap->DumpSpaces();
750 LOG(FATAL) << "Found dead object " << obj;
751 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700752 }
753 }
754}
755
756void MarkSweep::VerifySystemWeaks() {
757 Runtime* runtime = Runtime::Current();
758 // Verify system weaks, uses a special IsMarked callback which always returns true.
759 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
760 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
761
762 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700763 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700764 IndirectReferenceTable* table = &vm->weak_globals;
765 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
766 for (It it = table->begin(), end = table->end(); it != end; ++it) {
767 const Object** entry = *it;
768 VerifyIsLive(*entry);
769 }
770}
771
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800772struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700773 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -0700774 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700775 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800776};
777
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700778class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700779 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700780 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700781
782 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
783 // Note: self is not necessarily equal to thread since thread may be suspended.
784 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800785 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
786 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800787 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700788 mark_sweep_->GetBarrier().Pass(self);
789 }
790
791 private:
792 MarkSweep* mark_sweep_;
793};
794
Ian Rogers1d54e732013-05-02 21:10:01 -0700795void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800796 CheckpointMarkThreadRoots check_point(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700797 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -0700798 // Request the check point is run on all threads returning a count of the threads that must
799 // run through the barrier including self.
800 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
801 // Release locks then wait for all mutator threads to pass the barrier.
802 // TODO: optimize to not release locks when there are no threads to wait for.
803 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
804 Locks::mutator_lock_->SharedUnlock(self);
805 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
806 CHECK_EQ(old_state, kWaitingPerformingGc);
807 gc_barrier_->Increment(self, barrier_count);
808 self->SetState(kWaitingPerformingGc);
809 Locks::mutator_lock_->SharedLock(self);
810 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700811}
812
Ian Rogers30fab402012-01-23 15:43:46 -0800813void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800814 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700815 MarkSweep* mark_sweep = context->mark_sweep;
816 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -0700817 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700818 Thread* self = context->self;
819 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -0700820 // Use a bulk free, that merges consecutive objects before freeing or free per object?
821 // Documentation suggests better free performance with merging, but this may be at the expensive
822 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700823 size_t freed_objects = num_ptrs;
824 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
825 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700826 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700827 mark_sweep->freed_objects_.fetch_add(freed_objects);
828 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700829}
830
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700831void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700832 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -0700833 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700834 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700835 // We don't free any actual memory to avoid dirtying the shared zygote pages.
836 for (size_t i = 0; i < num_ptrs; ++i) {
837 Object* obj = static_cast<Object*>(ptrs[i]);
838 heap->GetLiveBitmap()->Clear(obj);
839 heap->GetCardTable()->MarkCard(obj);
840 }
841}
842
Ian Rogers1d54e732013-05-02 21:10:01 -0700843void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700844 size_t freed_bytes = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700845 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -0700846
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700847 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
848 // bitmap, resulting in occasional frees of Weaks which are still in use.
Ian Rogers1d54e732013-05-02 21:10:01 -0700849 timings_.NewSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700850 SweepSystemWeaksArray(allocations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700851
Ian Rogers1d54e732013-05-02 21:10:01 -0700852 timings_.NewSplit("Process allocation stack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700853 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
854 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -0700855 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
856 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
857 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
858 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
859 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700860 if (swap_bitmaps) {
861 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700862 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700863 }
864
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700865 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700866 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700867 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700868 Object** out = objects;
869
870 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -0700871 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -0700872 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700873 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700874 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
875 if (LIKELY(mark_bitmap->HasAddress(obj))) {
876 if (!mark_bitmap->Test(obj)) {
877 // Don't bother un-marking since we clear the mark bitmap anyways.
878 *(out++) = obj;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700879 }
880 } else if (!large_mark_objects->Test(obj)) {
881 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700882 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700883 }
884 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700885 CHECK_EQ(count, allocations->Size());
886 timings_.NewSplit("FreeList");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700887
888 size_t freed_objects = out - objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700889 freed_bytes += space->FreeList(self, freed_objects, objects);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700890 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700891 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700892 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700893 freed_objects_.fetch_add(freed_objects);
894 freed_bytes_.fetch_add(freed_bytes);
Ian Rogers1d54e732013-05-02 21:10:01 -0700895
896 timings_.NewSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700897 allocations->Reset();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700898}
899
Ian Rogers1d54e732013-05-02 21:10:01 -0700900void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700901 DCHECK(mark_stack_->IsEmpty());
902
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700903 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
904 // bitmap, resulting in occasional frees of Weaks which are still in use.
Ian Rogers1d54e732013-05-02 21:10:01 -0700905 timings_.NewSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700906 SweepSystemWeaks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700907
Ian Rogers1d54e732013-05-02 21:10:01 -0700908 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800909 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700910 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -0700911 scc.self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700912 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
913 // TODO: C++0x
914 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
915 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
916 space::ContinuousSpace* space = *it;
917 // We always sweep always collect spaces.
918 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
919 if (!partial && !sweep_space) {
920 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
921 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
922 }
923 if (sweep_space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700924 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
925 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700926 scc.space = space->AsDlMallocSpace();
927 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
928 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700929 if (swap_bitmaps) {
930 std::swap(live_bitmap, mark_bitmap);
931 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700932 if (!space->IsZygoteSpace()) {
933 timings_.NewSplit("SweepAllocSpace");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700934 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -0700935 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
936 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700937 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700938 timings_.NewSplit("SweepZygote");
939 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
940 // memory.
941 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
942 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700943 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700944 }
945 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800946
Ian Rogers1d54e732013-05-02 21:10:01 -0700947 timings_.NewSplit("SweepLargeObjects");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800948 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -0700949}
950
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700951void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
952 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -0700953 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
954 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
955 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700956 if (swap_bitmaps) {
957 std::swap(large_live_objects, large_mark_objects);
958 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700959 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700960 // O(n*log(n)) but hopefully there are not too many large objects.
961 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700962 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -0700963 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700964 // TODO: C++0x
965 typedef accounting::SpaceSetMap::Objects::iterator It;
966 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700967 if (!large_mark_objects->Test(*it)) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700968 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700969 ++freed_objects;
970 }
971 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700972 freed_objects_.fetch_add(freed_objects);
973 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700974 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700975}
976
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700977void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700978 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
979 // TODO: C++0x
980 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
981 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
982 space::ContinuousSpace* space = *it;
983 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700984 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700985
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700986 bool is_marked = IsMarked(ref);
987 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700988 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700989 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
990 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
991 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
992 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700993
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700994 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
995 DCHECK(klass != NULL);
996 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
997 DCHECK(fields != NULL);
998 bool found = false;
999 for (int32_t i = 0; i < fields->GetLength(); ++i) {
1000 const Field* cur = fields->Get(i);
1001 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1002 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1003 found = true;
1004 break;
1005 }
1006 }
1007 if (!found) {
1008 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1009 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001010
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001011 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1012 if (!obj_marked) {
1013 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1014 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1015 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001016 }
1017 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001018 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001019 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001020 }
1021}
1022
Carl Shapiro69759ea2011-07-21 18:13:35 -07001023// Process the "referent" field in a java.lang.ref.Reference. If the
1024// referent has not yet been marked, put it on the appropriate list in
1025// the gcHeap for later processing.
1026void MarkSweep::DelayReferenceReferent(Object* obj) {
1027 DCHECK(obj != NULL);
Brian Carlstrom1f870082011-08-23 16:02:11 -07001028 Class* klass = obj->GetClass();
1029 DCHECK(klass != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001030 DCHECK(klass->IsReferenceClass());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001031 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1032 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001033 if (kCountJavaLangRefs) {
1034 ++reference_count_;
1035 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001036 if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Brian Carlstrom4873d462011-08-21 15:23:39 -07001037 Object** list = NULL;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001038 if (klass->IsSoftReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001039 list = &soft_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001040 } else if (klass->IsWeakReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001041 list = &weak_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001042 } else if (klass->IsFinalizerReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001043 list = &finalizer_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001044 } else if (klass->IsPhantomReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001045 list = &phantom_reference_list_;
1046 }
Brian Carlstrom0796af02011-10-12 14:31:45 -07001047 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001048 // TODO: One lock per list?
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001049 heap_->EnqueuePendingReference(obj, list);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001050 }
1051}
1052
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001053void MarkSweep::ScanRoot(const Object* obj) {
1054 ScanObject(obj);
1055}
1056
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001057class MarkObjectVisitor {
1058 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001059 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001060
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001061 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001062 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1063 bool /* is_static */) const
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001064 NO_THREAD_SAFETY_ANALYSIS {
1065 if (kDebugLocking) {
1066 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1067 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1068 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001069 mark_sweep_->MarkObject(ref);
1070 }
1071
1072 private:
1073 MarkSweep* const mark_sweep_;
1074};
1075
Carl Shapiro69759ea2011-07-21 18:13:35 -07001076// Scans an object reference. Determines the type of the reference
1077// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001078void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001079 MarkObjectVisitor visitor(this);
1080 ScanObjectVisit(obj, visitor);
1081}
1082
1083class MarkStackChunk : public Task {
Ian Rogers1d54e732013-05-02 21:10:01 -07001084 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001085 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1086 : mark_sweep_(mark_sweep),
1087 thread_pool_(thread_pool),
1088 index_(0),
1089 length_(0),
1090 output_(NULL) {
1091 length_ = end - begin;
1092 if (begin != end) {
1093 // Cost not significant since we only do this for the initial set of mark stack chunks.
1094 memcpy(data_, begin, length_ * sizeof(*begin));
1095 }
1096 if (kCountTasks) {
1097 ++mark_sweep_->work_chunks_created_;
1098 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001099 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001100
1101 ~MarkStackChunk() {
1102 DCHECK(output_ == NULL || output_->length_ == 0);
1103 DCHECK_GE(index_, length_);
1104 delete output_;
1105 if (kCountTasks) {
1106 ++mark_sweep_->work_chunks_deleted_;
1107 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001108 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001109
1110 MarkSweep* const mark_sweep_;
1111 ThreadPool* const thread_pool_;
1112 static const size_t max_size = 1 * KB;
1113 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1114 size_t index_;
1115 // Input / output mark stack. We add newly marked references to data_ until length reaches
1116 // max_size. This is an optimization so that less tasks are created.
1117 // TODO: Investigate using a bounded buffer FIFO.
1118 Object* data_[max_size];
1119 // How many elements in data_ we need to scan.
1120 size_t length_;
1121 // Output block, newly marked references get added to the ouput block so that another thread can
1122 // scan them.
1123 MarkStackChunk* output_;
1124
1125 class MarkObjectParallelVisitor {
1126 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001127 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001128
Brian Carlstromdf629502013-07-17 22:39:56 -07001129 void operator()(const Object* /* obj */, const Object* ref,
1130 const MemberOffset& /* offset */, bool /* is_static */) const {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001131 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1132 chunk_task_->MarkStackPush(ref);
1133 }
1134 }
1135
1136 private:
1137 MarkStackChunk* const chunk_task_;
1138 };
1139
1140 // Push an object into the block.
1141 // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1142 // given time.
1143 void Push(Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001144 CHECK(obj != NULL);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001145 data_[length_++] = obj;
1146 }
1147
1148 void MarkStackPush(const Object* obj) {
1149 if (static_cast<size_t>(length_) < max_size) {
1150 Push(const_cast<Object*>(obj));
1151 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07001152 // Internal (thread-local) buffer is full, push to a new buffer instead.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001153 if (UNLIKELY(output_ == NULL)) {
1154 AllocateOutputChunk();
1155 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1156 // Output block is full, queue it up for processing and obtain a new block.
1157 EnqueueOutput();
1158 AllocateOutputChunk();
1159 }
1160 output_->Push(const_cast<Object*>(obj));
1161 }
1162 }
1163
1164 void ScanObject(Object* obj) {
1165 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1166 }
1167
1168 void EnqueueOutput() {
1169 if (output_ != NULL) {
1170 uint64_t start = 0;
1171 if (kMeasureOverhead) {
1172 start = NanoTime();
1173 }
1174 thread_pool_->AddTask(Thread::Current(), output_);
1175 output_ = NULL;
1176 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001177 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001178 }
1179 }
1180 }
1181
1182 void AllocateOutputChunk() {
1183 uint64_t start = 0;
1184 if (kMeasureOverhead) {
1185 start = NanoTime();
1186 }
1187 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1188 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001189 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001190 }
1191 }
1192
1193 void Finalize() {
1194 EnqueueOutput();
1195 delete this;
1196 }
1197
1198 // Scans all of the objects
1199 virtual void Run(Thread* self) {
Brian Carlstromd74e41b2013-03-24 23:47:01 -07001200 size_t index;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001201 while ((index = index_++) < length_) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001202 if (kUseMarkStackPrefetch) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001203 static const size_t prefetch_look_ahead = 1;
1204 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001205 }
1206 Object* obj = data_[index];
1207 DCHECK(obj != NULL);
1208 ScanObject(obj);
1209 }
1210 }
1211};
1212
1213void MarkSweep::ProcessMarkStackParallel() {
1214 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1215 Thread* self = Thread::Current();
1216 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1217 // Split the current mark stack up into work tasks.
1218 const size_t num_threads = thread_pool->GetThreadCount();
1219 const size_t stack_size = mark_stack_->Size();
1220 const size_t chunk_size =
1221 std::min((stack_size + num_threads - 1) / num_threads,
1222 static_cast<size_t>(MarkStackChunk::max_size));
1223 size_t index = 0;
1224 for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1225 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1226 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1227 index += chunk_size;
1228 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1229 }
1230 thread_pool->StartWorkers(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001231 thread_pool->Wait(self, true, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001232 mark_stack_->Reset();
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001233 // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001234 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001235}
1236
Ian Rogers5d76c432011-10-31 21:42:49 -07001237// Scan anything that's on the mark stack.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001238void MarkSweep::ProcessMarkStack() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001239 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1240 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1241 ProcessMarkStackParallel();
1242 return;
1243 }
1244
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001245 if (kUseMarkStackPrefetch) {
1246 const size_t fifo_size = 4;
1247 const size_t fifo_mask = fifo_size - 1;
1248 const Object* fifo[fifo_size];
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001249 for (size_t i = 0; i < fifo_size; ++i) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001250 fifo[i] = NULL;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001251 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001252 size_t fifo_pos = 0;
1253 size_t fifo_count = 0;
1254 for (;;) {
1255 const Object* obj = fifo[fifo_pos & fifo_mask];
1256 if (obj != NULL) {
1257 ScanObject(obj);
1258 fifo[fifo_pos & fifo_mask] = NULL;
1259 --fifo_count;
1260 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001261
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001262 if (!mark_stack_->IsEmpty()) {
1263 const Object* obj = mark_stack_->PopBack();
1264 DCHECK(obj != NULL);
1265 fifo[fifo_pos & fifo_mask] = obj;
1266 __builtin_prefetch(obj);
1267 fifo_count++;
1268 }
1269 fifo_pos++;
1270
1271 if (!fifo_count) {
1272 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1273 break;
1274 }
1275 }
1276 } else {
1277 while (!mark_stack_->IsEmpty()) {
1278 const Object* obj = mark_stack_->PopBack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001279 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001280 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001281 }
1282 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001283}
1284
Carl Shapiro69759ea2011-07-21 18:13:35 -07001285// Walks the reference list marking any references subject to the
1286// reference clearing policy. References with a black referent are
1287// removed from the list. References with white referents biased
1288// toward saving are blackened and also removed from the list.
1289void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1290 DCHECK(list != NULL);
1291 Object* clear = NULL;
1292 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001293
1294 DCHECK(mark_stack_->IsEmpty());
1295
Carl Shapiro69759ea2011-07-21 18:13:35 -07001296 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001297 Object* ref = heap_->DequeuePendingReference(list);
1298 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001299 if (referent == NULL) {
1300 // Referent was cleared by the user during marking.
1301 continue;
1302 }
1303 bool is_marked = IsMarked(referent);
1304 if (!is_marked && ((++counter) & 1)) {
1305 // Referent is white and biased toward saving, mark it.
1306 MarkObject(referent);
1307 is_marked = true;
1308 }
1309 if (!is_marked) {
1310 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001311 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001312 }
1313 }
1314 *list = clear;
1315 // Restart the mark with the newly black references added to the
1316 // root set.
1317 ProcessMarkStack();
1318}
1319
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001320inline bool MarkSweep::IsMarked(const Object* object) const
1321 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1322 if (object >= immune_begin_ && object < immune_end_) {
1323 return true;
1324 }
1325 DCHECK(current_mark_bitmap_ != NULL);
1326 if (current_mark_bitmap_->HasAddress(object)) {
1327 return current_mark_bitmap_->Test(object);
1328 }
1329 return heap_->GetMarkBitmap()->Test(object);
1330}
1331
1332
Carl Shapiro69759ea2011-07-21 18:13:35 -07001333// Unlink the reference list clearing references objects with white
1334// referents. Cleared references registered to a reference queue are
1335// scheduled for appending by the heap worker thread.
1336void MarkSweep::ClearWhiteReferences(Object** list) {
1337 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001338 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001339 Object* ref = heap_->DequeuePendingReference(list);
1340 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001341 if (referent != NULL && !IsMarked(referent)) {
1342 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001343 heap_->ClearReferenceReferent(ref);
1344 if (heap_->IsEnqueuable(ref)) {
1345 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001346 }
1347 }
1348 }
1349 DCHECK(*list == NULL);
1350}
1351
1352// Enqueues finalizer references with white referents. White
1353// referents are blackened, moved to the zombie field, and the
1354// referent field is cleared.
1355void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1356 DCHECK(list != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001357 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001358 bool has_enqueued = false;
1359 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001360 Object* ref = heap_->DequeuePendingReference(list);
1361 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001362 if (referent != NULL && !IsMarked(referent)) {
1363 MarkObject(referent);
1364 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001365 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001366 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001367 heap_->ClearReferenceReferent(ref);
1368 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001369 has_enqueued = true;
1370 }
1371 }
1372 if (has_enqueued) {
1373 ProcessMarkStack();
1374 }
1375 DCHECK(*list == NULL);
1376}
1377
Carl Shapiro58551df2011-07-24 03:09:51 -07001378// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001379void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1380 Object** weak_references,
1381 Object** finalizer_references,
1382 Object** phantom_references) {
1383 DCHECK(soft_references != NULL);
1384 DCHECK(weak_references != NULL);
1385 DCHECK(finalizer_references != NULL);
1386 DCHECK(phantom_references != NULL);
1387
1388 // Unless we are in the zygote or required to clear soft references
1389 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001390 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001391 PreserveSomeSoftReferences(soft_references);
1392 }
1393
1394 // Clear all remaining soft and weak references with white
1395 // referents.
1396 ClearWhiteReferences(soft_references);
1397 ClearWhiteReferences(weak_references);
1398
1399 // Preserve all white objects with finalize methods and schedule
1400 // them for finalization.
1401 EnqueueFinalizerReferences(finalizer_references);
1402
1403 // Clear all f-reachable soft and weak references with white
1404 // referents.
1405 ClearWhiteReferences(soft_references);
1406 ClearWhiteReferences(weak_references);
1407
1408 // Clear all phantom references with white referents.
1409 ClearWhiteReferences(phantom_references);
1410
1411 // At this point all reference lists should be empty.
1412 DCHECK(*soft_references == NULL);
1413 DCHECK(*weak_references == NULL);
1414 DCHECK(*finalizer_references == NULL);
1415 DCHECK(*phantom_references == NULL);
1416}
1417
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001418void MarkSweep::UnBindBitmaps() {
Ian Rogers1d54e732013-05-02 21:10:01 -07001419 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1420 // TODO: C++0x
1421 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1422 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1423 space::ContinuousSpace* space = *it;
1424 if (space->IsDlMallocSpace()) {
1425 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001426 if (alloc_space->temp_bitmap_.get() != NULL) {
1427 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001428 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001429 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1430 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1431 alloc_space->mark_bitmap_.reset(new_bitmap);
1432 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1433 }
1434 }
1435 }
1436}
1437
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001438void MarkSweep::FinishPhase() {
1439 // Can't enqueue referneces if we hold the mutator lock.
1440 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001441 Heap* heap = GetHeap();
1442 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001443
Ian Rogers1d54e732013-05-02 21:10:01 -07001444 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001445
Ian Rogers1d54e732013-05-02 21:10:01 -07001446 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001447 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001448
Ian Rogers1d54e732013-05-02 21:10:01 -07001449 timings_.NewSplit("RequestHeapTrim");
1450 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001451
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001452 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001453 total_time_ns_ += GetDurationNs();
1454 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1455 std::plus<uint64_t>());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001456 total_freed_objects_ += GetFreedObjects();
1457 total_freed_bytes_ += GetFreedBytes();
1458
1459 // Ensure that the mark stack is empty.
1460 CHECK(mark_stack_->IsEmpty());
1461
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001462 if (kCountScannedTypes) {
1463 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1464 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001465 }
1466
1467 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001468 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001469 }
1470
1471 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001472 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001473 }
1474
1475 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001476 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001477 }
1478
1479 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001480 VLOG(gc) << "Classes marked " << classes_marked_;
1481 }
1482
1483 if (kCountJavaLangRefs) {
1484 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001485 }
1486
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001487 // Update the cumulative loggers.
1488 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001489 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001490 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001491
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001492 // Clear all of the spaces' mark bitmaps.
Ian Rogers1d54e732013-05-02 21:10:01 -07001493 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1494 // TODO: C++0x
1495 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1496 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1497 space::ContinuousSpace* space = *it;
1498 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001499 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001500 }
1501 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001502 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001503
1504 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001505 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001506 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001507}
1508
Ian Rogers1d54e732013-05-02 21:10:01 -07001509} // namespace collector
1510} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001511} // namespace art