blob: 79a571bc25cf451827fea55f58bc0b3d7793569f [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Elliott Hughes07ed66b2012-12-12 18:34:25 -080024#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080025#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080027#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070028#include "gc/accounting/card_table-inl.h"
29#include "gc/accounting/heap_bitmap.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/image_space.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070035#include "indirect_reference_table.h"
36#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070037#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070038#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "mark_sweep-inl.h"
40#include "mirror/class-inl.h"
41#include "mirror/class_loader.h"
42#include "mirror/dex_cache.h"
43#include "mirror/field.h"
44#include "mirror/field-inl.h"
45#include "mirror/object-inl.h"
46#include "mirror/object_array.h"
47#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070048#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070049#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070050#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070051#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080053using namespace art::mirror;
54
Carl Shapiro69759ea2011-07-21 18:13:35 -070055namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070056namespace gc {
57namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070058
Mathieu Chartier02b6a782012-10-26 13:51:26 -070059// Performance options.
60static const bool kParallelMarkStack = true;
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -070061static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
Mathieu Chartier858f1c52012-10-17 17:45:55 -070062static const bool kUseMarkStackPrefetch = true;
63
Mathieu Chartier02b6a782012-10-26 13:51:26 -070064// Profiling and information flags.
65static const bool kCountClassesMarked = false;
66static const bool kProfileLargeObjects = false;
67static const bool kMeasureOverhead = false;
68static const bool kCountTasks = false;
Mathieu Chartierd22d5482012-11-06 17:14:12 -080069static const bool kCountJavaLangRefs = false;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070070
Mathieu Chartier357e9be2012-08-01 11:00:14 -070071class SetFingerVisitor {
72 public:
73 SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -070074 }
75
76 void operator ()(void* finger) const {
77 mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
78 }
79
80 private:
81 MarkSweep* const mark_sweep_;
82};
83
Ian Rogers1d54e732013-05-02 21:10:01 -070084void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085 // Bind live to mark bitmap if necessary.
86 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
87 BindLiveToMarkBitmap(space);
88 }
89
90 // Add the space to the immune region.
91 if (immune_begin_ == NULL) {
92 DCHECK(immune_end_ == NULL);
93 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
94 reinterpret_cast<Object*>(space->End()));
95 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -070096 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
97 const space::ContinuousSpace* prev_space = NULL;
98 // Find out if the previous space is immune.
99 // TODO: C++0x
100 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
101 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
102 if (*it == space) {
103 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800104 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700105 prev_space = *it;
106 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800107
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 // If previous space was immune, then extend the immune region. Relies on continuous spaces
109 // being sorted by Heap::AddContinuousSpace.
110 if (prev_space != NULL &&
111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
115 }
116 }
117}
118
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800119void MarkSweep::BindBitmaps() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700120 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
122
123 // Mark all of the spaces we never collect as immune.
Ian Rogers1d54e732013-05-02 21:10:01 -0700124 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
125 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
126 space::ContinuousSpace* space = *it;
127 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800128 ImmuneSpace(space);
129 }
130 }
131}
132
Ian Rogers1d54e732013-05-02 21:10:01 -0700133MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
134 : GarbageCollector(heap,
135 name_prefix + (name_prefix.empty() ? "" : " ") +
136 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
137 current_mark_bitmap_(NULL),
138 java_lang_Class_(NULL),
139 mark_stack_(NULL),
140 finger_(NULL),
141 immune_begin_(NULL),
142 immune_end_(NULL),
143 soft_reference_list_(NULL),
144 weak_reference_list_(NULL),
145 finalizer_reference_list_(NULL),
146 phantom_reference_list_(NULL),
147 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800148 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800149 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
150 mark_stack_expand_lock_("mark sweep mark stack expand lock"),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700151 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700152 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800153}
154
155void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700156 timings_.Reset();
157 timings_.StartSplit("InitializePhase");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800158 mark_stack_ = GetHeap()->mark_stack_.get();
159 DCHECK(mark_stack_ != NULL);
160 finger_ = NULL;
161 SetImmuneRange(NULL, NULL);
162 soft_reference_list_ = NULL;
163 weak_reference_list_ = NULL;
164 finalizer_reference_list_ = NULL;
165 phantom_reference_list_ = NULL;
166 cleared_reference_list_ = NULL;
167 freed_bytes_ = 0;
168 freed_objects_ = 0;
169 class_count_ = 0;
170 array_count_ = 0;
171 other_count_ = 0;
172 large_object_test_ = 0;
173 large_object_mark_ = 0;
174 classes_marked_ = 0;
175 overhead_time_ = 0;
176 work_chunks_created_ = 0;
177 work_chunks_deleted_ = 0;
178 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700179 java_lang_Class_ = Class::GetJavaLangClass();
180 CHECK(java_lang_Class_ != NULL);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700181 FindDefaultMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800182 // Do any pre GC verification.
183 heap_->PreGcVerification(this);
184}
185
186void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700187 timings_.NewSplit("ProcessReferences");
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
190 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800191}
192
193bool MarkSweep::HandleDirtyObjectsPhase() {
194 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700195 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800196 Locks::mutator_lock_->AssertExclusiveHeld(self);
197
198 {
Ian Rogers1d54e732013-05-02 21:10:01 -0700199 timings_.NewSplit("ReMarkRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800200 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
201
202 // Re-mark root set.
203 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800204
205 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Ian Rogers1d54e732013-05-02 21:10:01 -0700206 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800207 }
208
209 ProcessReferences(self);
210
211 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
212 if (GetHeap()->verify_missing_card_marks_) {
213 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
214 // This second sweep makes sure that we don't have any objects in the live stack which point to
215 // freed objects. These cause problems since their references may be previously freed objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700216 SweepArray(allocation_stack, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800217 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700218 timings_.NewSplit("UnMarkAllocStack");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800219 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700220 // The allocation stack contains things allocated since the start of the GC. These may have been
221 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
222 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
223 // collection.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800224 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
Ian Rogers1d54e732013-05-02 21:10:01 -0700225 GetHeap()->large_object_space_->GetMarkObjects(),
226 allocation_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800227 }
228 return true;
229}
230
231bool MarkSweep::IsConcurrent() const {
232 return is_concurrent_;
233}
234
235void MarkSweep::MarkingPhase() {
236 Heap* heap = GetHeap();
237 Thread* self = Thread::Current();
238
Ian Rogers1d54e732013-05-02 21:10:01 -0700239 timings_.NewSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240 BindBitmaps();
241 FindDefaultMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800242 // Process dirty cards and add dirty cards to mod union tables.
243 heap->ProcessCards(timings_);
244
245 // Need to do this before the checkpoint since we don't want any threads to add references to
246 // the live stack during the recursive mark.
Ian Rogers1d54e732013-05-02 21:10:01 -0700247 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800248 heap->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800249
250 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
251 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
252 // If we exclusively hold the mutator lock, all threads must be suspended.
Ian Rogers1d54e732013-05-02 21:10:01 -0700253 timings_.NewSplit("MarkRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800254 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800255 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700256 timings_.NewSplit("MarkRootsCheckpoint");
257 MarkRootsCheckpoint(self);
258 timings_.NewSplit("MarkNonThreadRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800259 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800260 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700261 timings_.NewSplit("MarkConcurrentRoots");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800262 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263
264 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
265 MarkReachableObjects();
266}
267
268void MarkSweep::MarkReachableObjects() {
269 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
270 // knowing that new allocations won't be marked as live.
Ian Rogers1d54e732013-05-02 21:10:01 -0700271 timings_.NewSplit("MarkStackAsLive");
272 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800273 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
274 heap_->large_object_space_->GetLiveObjects(),
275 live_stack);
276 live_stack->Reset();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800277 // Recursively mark all the non-image bits set in the mark bitmap.
278 RecursiveMark();
279 DisableFinger();
280}
281
282void MarkSweep::ReclaimPhase() {
283 Thread* self = Thread::Current();
284
285 if (!IsConcurrent()) {
286 ProcessReferences(self);
287 }
288
289 // Before freeing anything, lets verify the heap.
290 if (kIsDebugBuild) {
291 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
292 VerifyImageRoots();
293 }
294 heap_->PreSweepingGcVerification(this);
295
296 {
297 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
298
299 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700300 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800301
302 // Swap the live and mark bitmaps for each space which we modified space. This is an
303 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
304 // bitmaps.
Ian Rogers1d54e732013-05-02 21:10:01 -0700305 timings_.NewSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800306 SwapBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800307
308 // Unbind the live and mark bitmaps.
309 UnBindBitmaps();
310 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800311}
312
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800313void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
314 immune_begin_ = begin;
315 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700316}
317
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700318void MarkSweep::FindDefaultMarkBitmap() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700319 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
320 // TODO: C++0x
321 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
322 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
323 space::ContinuousSpace* space = *it;
324 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700325 current_mark_bitmap_ = (*it)->GetMarkBitmap();
326 CHECK(current_mark_bitmap_ != NULL);
327 return;
328 }
329 }
330 GetHeap()->DumpSpaces();
331 LOG(FATAL) << "Could not find a default mark bitmap";
332}
333
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800334void MarkSweep::ExpandMarkStack() {
335 // Rare case, no need to have Thread::Current be a parameter.
336 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
337 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
338 // Someone else acquired the lock and expanded the mark stack before us.
339 return;
340 }
341 std::vector<Object*> temp;
342 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
343 mark_stack_->Resize(mark_stack_->Capacity() * 2);
344 for (size_t i = 0; i < temp.size(); ++i) {
345 mark_stack_->PushBack(temp[i]);
346 }
347}
348
349inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) {
350 DCHECK(obj != NULL);
351 if (MarkObjectParallel(obj)) {
352 if (kDisableFinger || (check_finger && obj < finger_)) {
353 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
354 // Only reason a push can fail is that the mark stack is full.
355 ExpandMarkStack();
356 }
357 }
358 }
359}
360
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700361inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700362 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700363
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700364 if (obj >= immune_begin_ && obj < immune_end_) {
365 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700366 return;
367 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700368
369 // Try to take advantage of locality of references within a space, failing this find the space
370 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700371 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700372 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700373 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
374 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700375 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700376 } else {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700377 MarkLargeObject(obj);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700378 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700379 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700380 }
381
Carl Shapiro69759ea2011-07-21 18:13:35 -0700382 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700383 if (!object_bitmap->Test(obj)) {
384 object_bitmap->Set(obj);
385 if (kDisableFinger || (check_finger && obj < finger_)) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700386 // Do we need to expand the mark stack?
387 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800388 ExpandMarkStack();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700389 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700390 // The object must be pushed on to the mark stack.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700391 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700392 }
393 }
394}
395
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700396// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
397bool MarkSweep::MarkLargeObject(const Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700398 // TODO: support >1 discontinuous space.
399 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
400 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700401 if (kProfileLargeObjects) {
402 ++large_object_test_;
403 }
404 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700405 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700406 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
407 LOG(ERROR) << "Attempting see if it's a bad root";
408 VerifyRoots();
409 LOG(FATAL) << "Can't mark bad root";
410 }
411 if (kProfileLargeObjects) {
412 ++large_object_mark_;
413 }
414 large_objects->Set(obj);
415 // Don't need to check finger since large objects never have any object references.
416 return true;
417 }
418 return false;
419}
420
421inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
422 DCHECK(obj != NULL);
423
424 if (obj >= immune_begin_ && obj < immune_end_) {
425 DCHECK(IsMarked(obj));
426 return false;
427 }
428
429 // Try to take advantage of locality of references within a space, failing this find the space
430 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700431 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700432 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700433 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700434 if (new_bitmap != NULL) {
435 object_bitmap = new_bitmap;
436 } else {
437 // TODO: Remove the Thread::Current here?
438 // TODO: Convert this to some kind of atomic marking?
439 MutexLock mu(Thread::Current(), large_object_lock_);
440 return MarkLargeObject(obj);
441 }
442 }
443
444 // Return true if the object was not previously marked.
445 return !object_bitmap->AtomicTestAndSet(obj);
446}
447
Carl Shapiro69759ea2011-07-21 18:13:35 -0700448// Used to mark objects when recursing. Recursion is done by moving
449// the finger across the bitmaps in address order and marking child
450// objects. Any newly-marked objects whose addresses are lower than
451// the finger won't be visited by the bitmap scan, so those objects
452// need to be added to the mark stack.
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700453void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700454 if (obj != NULL) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700455 MarkObjectNonNull(obj, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700456 }
457}
458
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800459void MarkSweep::MarkRoot(const Object* obj) {
460 if (obj != NULL) {
461 MarkObjectNonNull(obj, false);
462 }
463}
464
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800465void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
466 DCHECK(root != NULL);
467 DCHECK(arg != NULL);
468 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
469 mark_sweep->MarkObjectNonNullParallel(root, false);
470}
471
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700472void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700473 DCHECK(root != NULL);
474 DCHECK(arg != NULL);
475 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700476 mark_sweep->MarkObjectNonNull(root, false);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700477}
478
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700479void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
480 DCHECK(root != NULL);
481 DCHECK(arg != NULL);
482 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700483 mark_sweep->MarkObjectNonNull(root, true);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700484}
485
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700486void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800487 const StackVisitor* visitor) {
488 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700489}
490
Ian Rogers40e3bac2012-11-20 00:09:14 -0800491void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700492 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700493 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
494 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700495 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700496 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800497 if (visitor != NULL) {
498 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700499 }
500 }
501 }
502}
503
504void MarkSweep::VerifyRoots() {
505 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
506}
507
Carl Shapiro69759ea2011-07-21 18:13:35 -0700508// Marks all objects in the root set.
509void MarkSweep::MarkRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700510 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700511}
512
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700513void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700514 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700515}
516
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700517void MarkSweep::MarkConcurrentRoots() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700518 // Visit all runtime roots and clear dirty flags.
519 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700520}
521
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700522class CheckObjectVisitor {
523 public:
524 CheckObjectVisitor(MarkSweep* const mark_sweep)
525 : mark_sweep_(mark_sweep) {
526
527 }
528
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700529 void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800530 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800531 if (kDebugLocking) {
532 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
533 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700534 mark_sweep_->CheckReference(obj, ref, offset, is_static);
535 }
536
537 private:
538 MarkSweep* const mark_sweep_;
539};
540
541void MarkSweep::CheckObject(const Object* obj) {
542 DCHECK(obj != NULL);
543 CheckObjectVisitor visitor(this);
544 VisitObjectReferences(obj, visitor);
545}
546
547void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
548 DCHECK(root != NULL);
549 DCHECK(arg != NULL);
550 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700551 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700552 mark_sweep->CheckObject(root);
553}
554
Ian Rogers1d54e732013-05-02 21:10:01 -0700555void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
556 CHECK(space->IsDlMallocSpace());
557 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
558 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
559 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700560 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
561 alloc_space->temp_bitmap_.reset(mark_bitmap);
562 alloc_space->mark_bitmap_.reset(live_bitmap);
563}
564
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700565class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700566 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700567 ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700568 }
569
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800570 // TODO: Fixme when anotatalysis works with visitors.
571 void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
572 if (kDebugLocking) {
573 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
574 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
575 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700576 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700577 }
578
579 private:
580 MarkSweep* const mark_sweep_;
581};
582
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800583void MarkSweep::ScanGrayObjects(byte minimum_age) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700584 accounting::CardTable* card_table = GetHeap()->GetCardTable();
585 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700586 ScanObjectVisitor visitor(this);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700587 SetFingerVisitor finger_visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700588 // TODO: C++0x
589 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
590 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
591 space::ContinuousSpace* space = *it;
592 switch (space->GetGcRetentionPolicy()) {
593 case space::kGcRetentionPolicyNeverCollect:
594 timings_.NewSplit("ScanGrayImageSpaceObjects");
595 break;
596 case space::kGcRetentionPolicyFullCollect:
597 timings_.NewSplit("ScanGrayZygoteSpaceObjects");
598 break;
599 case space::kGcRetentionPolicyAlwaysCollect:
600 timings_.NewSplit("ScanGrayAllocSpaceObjects");
601 break;
602 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700603 byte* begin = space->Begin();
604 byte* end = space->End();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700605 // Image spaces are handled properly since live == marked for them.
Ian Rogers1d54e732013-05-02 21:10:01 -0700606 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
607 card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700608 }
609}
610
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700611class CheckBitmapVisitor {
612 public:
613 CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700614 }
615
Ian Rogers1d54e732013-05-02 21:10:01 -0700616 void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800617 if (kDebugLocking) {
618 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
619 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700620 DCHECK(obj != NULL);
621 mark_sweep_->CheckObject(obj);
622 }
623
624 private:
625 MarkSweep* mark_sweep_;
626};
627
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700628void MarkSweep::VerifyImageRoots() {
629 // Verify roots ensures that all the references inside the image space point
630 // objects which are either in the image space or marked objects in the alloc
631 // space
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700632 CheckBitmapVisitor visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700633 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
634 // TODO: C++0x
635 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
636 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700637 if ((*it)->IsImageSpace()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700638 space::ImageSpace* space = (*it)->AsImageSpace();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700639 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
640 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700641 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700642 DCHECK(live_bitmap != NULL);
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800643 live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor());
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700644 }
645 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700646}
647
Carl Shapiro58551df2011-07-24 03:09:51 -0700648// Populates the mark stack based on the set of marked objects and
649// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800650void MarkSweep::RecursiveMark() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700651 timings_.NewSplit("RecursiveMark");
Brian Carlstrom1f870082011-08-23 16:02:11 -0700652 // RecursiveMark will build the lists of known instances of the Reference classes.
653 // See DelayReferenceReferent for details.
654 CHECK(soft_reference_list_ == NULL);
655 CHECK(weak_reference_list_ == NULL);
656 CHECK(finalizer_reference_list_ == NULL);
657 CHECK(phantom_reference_list_ == NULL);
658 CHECK(cleared_reference_list_ == NULL);
659
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800660 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700661 SetFingerVisitor set_finger_visitor(this);
662 ScanObjectVisitor scan_visitor(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800663 if (!kDisableFinger) {
664 finger_ = NULL;
Ian Rogers1d54e732013-05-02 21:10:01 -0700665 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
666 // TODO: C++0x
667 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
668 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
669 space::ContinuousSpace* space = *it;
670 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
671 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800672 current_mark_bitmap_ = space->GetMarkBitmap();
673 if (current_mark_bitmap_ == NULL) {
674 GetHeap()->DumpSpaces();
675 LOG(FATAL) << "invalid bitmap";
676 }
677 // This function does not handle heap end increasing, so we must use the space end.
678 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
679 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
680 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700681 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700682 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700683 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800684 DisableFinger();
Ian Rogers1d54e732013-05-02 21:10:01 -0700685 timings_.NewSplit("ProcessMarkStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700686 ProcessMarkStack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700687}
688
689bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
690 return
691 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700692 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
693}
694
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800695void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
696 ScanGrayObjects(minimum_age);
Ian Rogers1d54e732013-05-02 21:10:01 -0700697 timings_.NewSplit("ProcessMarkStack");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700698 ProcessMarkStack();
699}
700
Carl Shapiro58551df2011-07-24 03:09:51 -0700701void MarkSweep::ReMarkRoots() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700702 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700703}
704
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800705void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700706 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700707 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700708 IndirectReferenceTable* table = &vm->weak_globals;
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700709 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
Elliott Hughes410c0c82011-09-01 17:58:25 -0700710 for (It it = table->begin(), end = table->end(); it != end; ++it) {
711 const Object** entry = *it;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700712 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700713 *entry = kClearedJniWeakGlobal;
714 }
715 }
716}
717
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700718struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -0700719 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700720 MarkSweep* mark_sweep;
721};
722
723// Either marked or not live.
724bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
725 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
726 if (array_check->mark_sweep->IsMarked(object)) {
727 return true;
728 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700729 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700730 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
731}
732
Ian Rogers1d54e732013-05-02 21:10:01 -0700733void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700734 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700735 // The callbacks check
736 // !is_marked where is_marked is the callback but we want
737 // !IsMarked && IsLive
738 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
739 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700740
741 ArrayMarkedCheck visitor;
742 visitor.live_stack = allocations;
743 visitor.mark_sweep = this;
744 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
745 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
746 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
747}
748
749void MarkSweep::SweepSystemWeaks() {
750 Runtime* runtime = Runtime::Current();
751 // The callbacks check
752 // !is_marked where is_marked is the callback but we want
753 // !IsMarked && IsLive
754 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
755 // Or for swapped (IsLive || !IsMarked).
756 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
757 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
758 SweepJniWeakGlobals(IsMarkedCallback, this);
Carl Shapiro58551df2011-07-24 03:09:51 -0700759}
760
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700761bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
762 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
763 // We don't actually want to sweep the object, so lets return "marked"
764 return true;
765}
766
767void MarkSweep::VerifyIsLive(const Object* obj) {
768 Heap* heap = GetHeap();
769 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700770 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700771 if (!large_object_space->GetLiveObjects()->Test(obj)) {
772 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
773 heap->allocation_stack_->End()) {
774 // Object not found!
775 heap->DumpSpaces();
776 LOG(FATAL) << "Found dead object " << obj;
777 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700778 }
779 }
780}
781
782void MarkSweep::VerifySystemWeaks() {
783 Runtime* runtime = Runtime::Current();
784 // Verify system weaks, uses a special IsMarked callback which always returns true.
785 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
786 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
787
788 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700789 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700790 IndirectReferenceTable* table = &vm->weak_globals;
791 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
792 for (It it = table->begin(), end = table->end(); it != end; ++it) {
793 const Object** entry = *it;
794 VerifyIsLive(*entry);
795 }
796}
797
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800798struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700799 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -0700800 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700801 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800802};
803
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700804class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700805 public:
806 CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
807
808 }
809
810 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
811 // Note: self is not necessarily equal to thread since thread may be suspended.
812 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800813 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
814 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800815 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700816 mark_sweep_->GetBarrier().Pass(self);
817 }
818
819 private:
820 MarkSweep* mark_sweep_;
821};
822
Ian Rogers1d54e732013-05-02 21:10:01 -0700823void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800824 CheckpointMarkThreadRoots check_point(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700825 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -0700826 // Request the check point is run on all threads returning a count of the threads that must
827 // run through the barrier including self.
828 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
829 // Release locks then wait for all mutator threads to pass the barrier.
830 // TODO: optimize to not release locks when there are no threads to wait for.
831 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
832 Locks::mutator_lock_->SharedUnlock(self);
833 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
834 CHECK_EQ(old_state, kWaitingPerformingGc);
835 gc_barrier_->Increment(self, barrier_count);
836 self->SetState(kWaitingPerformingGc);
837 Locks::mutator_lock_->SharedLock(self);
838 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700839}
840
Ian Rogers30fab402012-01-23 15:43:46 -0800841void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800842 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700843 MarkSweep* mark_sweep = context->mark_sweep;
844 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -0700845 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700846 Thread* self = context->self;
847 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -0700848 // Use a bulk free, that merges consecutive objects before freeing or free per object?
849 // Documentation suggests better free performance with merging, but this may be at the expensive
850 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700851 size_t freed_objects = num_ptrs;
852 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
853 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700854 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700855 mark_sweep->freed_objects_ += freed_objects;
856 mark_sweep->freed_bytes_ += freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700857}
858
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700859void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700860 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -0700861 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700862 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700863 // We don't free any actual memory to avoid dirtying the shared zygote pages.
864 for (size_t i = 0; i < num_ptrs; ++i) {
865 Object* obj = static_cast<Object*>(ptrs[i]);
866 heap->GetLiveBitmap()->Clear(obj);
867 heap->GetCardTable()->MarkCard(obj);
868 }
869}
870
Ian Rogers1d54e732013-05-02 21:10:01 -0700871void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700872 size_t freed_bytes = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700873 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -0700874
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700875 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
876 // bitmap, resulting in occasional frees of Weaks which are still in use.
Ian Rogers1d54e732013-05-02 21:10:01 -0700877 timings_.NewSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700878 SweepSystemWeaksArray(allocations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700879
Ian Rogers1d54e732013-05-02 21:10:01 -0700880 timings_.NewSplit("Process allocation stack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700881 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
882 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -0700883 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
884 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
885 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
886 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
887 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700888 if (swap_bitmaps) {
889 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700890 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700891 }
892
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700893 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700894 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700895 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700896 Object** out = objects;
897
898 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -0700899 Thread* self = Thread::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700900 for (size_t i = 0;i < count;++i) {
901 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700902 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
903 if (LIKELY(mark_bitmap->HasAddress(obj))) {
904 if (!mark_bitmap->Test(obj)) {
905 // Don't bother un-marking since we clear the mark bitmap anyways.
906 *(out++) = obj;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700907 }
908 } else if (!large_mark_objects->Test(obj)) {
909 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700910 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700911 }
912 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700913 CHECK_EQ(count, allocations->Size());
914 timings_.NewSplit("FreeList");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700915
916 size_t freed_objects = out - objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700917 freed_bytes += space->FreeList(self, freed_objects, objects);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700918 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700919 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700920 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700921 freed_objects_ += freed_objects;
922 freed_bytes_ += freed_bytes;
Ian Rogers1d54e732013-05-02 21:10:01 -0700923
924 timings_.NewSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700925 allocations->Reset();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700926}
927
Ian Rogers1d54e732013-05-02 21:10:01 -0700928void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700929 DCHECK(mark_stack_->IsEmpty());
930
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700931 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
932 // bitmap, resulting in occasional frees of Weaks which are still in use.
Ian Rogers1d54e732013-05-02 21:10:01 -0700933 timings_.NewSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700934 SweepSystemWeaks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700935
Ian Rogers1d54e732013-05-02 21:10:01 -0700936 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800937 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700938 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -0700939 scc.self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700940 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
941 // TODO: C++0x
942 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
943 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
944 space::ContinuousSpace* space = *it;
945 // We always sweep always collect spaces.
946 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
947 if (!partial && !sweep_space) {
948 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
949 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
950 }
951 if (sweep_space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700952 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
953 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700954 scc.space = space->AsDlMallocSpace();
955 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
956 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700957 if (swap_bitmaps) {
958 std::swap(live_bitmap, mark_bitmap);
959 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700960 if (!space->IsZygoteSpace()) {
961 timings_.NewSplit("SweepAllocSpace");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700962 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -0700963 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
964 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700965 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700966 timings_.NewSplit("SweepZygote");
967 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
968 // memory.
969 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
970 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700971 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700972 }
973 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800974
Ian Rogers1d54e732013-05-02 21:10:01 -0700975 timings_.NewSplit("SweepLargeObjects");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800976 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -0700977}
978
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700979void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
980 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -0700981 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
982 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
983 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700984 if (swap_bitmaps) {
985 std::swap(large_live_objects, large_mark_objects);
986 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700987 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700988 // O(n*log(n)) but hopefully there are not too many large objects.
989 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700990 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -0700991 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700992 // TODO: C++0x
993 typedef accounting::SpaceSetMap::Objects::iterator It;
994 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700995 if (!large_mark_objects->Test(*it)) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700996 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700997 ++freed_objects;
998 }
999 }
1000 freed_objects_ += freed_objects;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001001 freed_bytes_ += freed_bytes;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001002 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001003}
1004
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001005void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001006 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1007 // TODO: C++0x
1008 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1009 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1010 space::ContinuousSpace* space = *it;
1011 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001012 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001013
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001014 bool is_marked = IsMarked(ref);
1015 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001016 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001017 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1018 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1019 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1020 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001021
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001022 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1023 DCHECK(klass != NULL);
1024 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1025 DCHECK(fields != NULL);
1026 bool found = false;
1027 for (int32_t i = 0; i < fields->GetLength(); ++i) {
1028 const Field* cur = fields->Get(i);
1029 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1030 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1031 found = true;
1032 break;
1033 }
1034 }
1035 if (!found) {
1036 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1037 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001038
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001039 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1040 if (!obj_marked) {
1041 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1042 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1043 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001044 }
1045 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001046 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001047 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001048 }
1049}
1050
Carl Shapiro69759ea2011-07-21 18:13:35 -07001051// Process the "referent" field in a java.lang.ref.Reference. If the
1052// referent has not yet been marked, put it on the appropriate list in
1053// the gcHeap for later processing.
1054void MarkSweep::DelayReferenceReferent(Object* obj) {
1055 DCHECK(obj != NULL);
Brian Carlstrom1f870082011-08-23 16:02:11 -07001056 Class* klass = obj->GetClass();
1057 DCHECK(klass != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001058 DCHECK(klass->IsReferenceClass());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001059 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1060 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001061 if (kCountJavaLangRefs) {
1062 ++reference_count_;
1063 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001064 if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Brian Carlstrom4873d462011-08-21 15:23:39 -07001065 Object** list = NULL;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001066 if (klass->IsSoftReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001067 list = &soft_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001068 } else if (klass->IsWeakReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001069 list = &weak_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001070 } else if (klass->IsFinalizerReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001071 list = &finalizer_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001072 } else if (klass->IsPhantomReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001073 list = &phantom_reference_list_;
1074 }
Brian Carlstrom0796af02011-10-12 14:31:45 -07001075 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001076 // TODO: One lock per list?
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001077 heap_->EnqueuePendingReference(obj, list);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001078 }
1079}
1080
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001081void MarkSweep::ScanRoot(const Object* obj) {
1082 ScanObject(obj);
1083}
1084
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001085class MarkObjectVisitor {
1086 public:
1087 MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
1088 }
1089
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001090 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001091 void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001092 bool /* is_static */) const
1093 NO_THREAD_SAFETY_ANALYSIS {
1094 if (kDebugLocking) {
1095 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1096 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1097 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001098 mark_sweep_->MarkObject(ref);
1099 }
1100
1101 private:
1102 MarkSweep* const mark_sweep_;
1103};
1104
Carl Shapiro69759ea2011-07-21 18:13:35 -07001105// Scans an object reference. Determines the type of the reference
1106// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001107void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001108 MarkObjectVisitor visitor(this);
1109 ScanObjectVisit(obj, visitor);
1110}
1111
1112class MarkStackChunk : public Task {
Ian Rogers1d54e732013-05-02 21:10:01 -07001113 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001114 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1115 : mark_sweep_(mark_sweep),
1116 thread_pool_(thread_pool),
1117 index_(0),
1118 length_(0),
1119 output_(NULL) {
1120 length_ = end - begin;
1121 if (begin != end) {
1122 // Cost not significant since we only do this for the initial set of mark stack chunks.
1123 memcpy(data_, begin, length_ * sizeof(*begin));
1124 }
1125 if (kCountTasks) {
1126 ++mark_sweep_->work_chunks_created_;
1127 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001128 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001129
1130 ~MarkStackChunk() {
1131 DCHECK(output_ == NULL || output_->length_ == 0);
1132 DCHECK_GE(index_, length_);
1133 delete output_;
1134 if (kCountTasks) {
1135 ++mark_sweep_->work_chunks_deleted_;
1136 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001137 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001138
1139 MarkSweep* const mark_sweep_;
1140 ThreadPool* const thread_pool_;
1141 static const size_t max_size = 1 * KB;
1142 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1143 size_t index_;
1144 // Input / output mark stack. We add newly marked references to data_ until length reaches
1145 // max_size. This is an optimization so that less tasks are created.
1146 // TODO: Investigate using a bounded buffer FIFO.
1147 Object* data_[max_size];
1148 // How many elements in data_ we need to scan.
1149 size_t length_;
1150 // Output block, newly marked references get added to the ouput block so that another thread can
1151 // scan them.
1152 MarkStackChunk* output_;
1153
1154 class MarkObjectParallelVisitor {
1155 public:
1156 MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {
1157
1158 }
1159
1160 void operator ()(const Object* /* obj */, const Object* ref,
1161 const MemberOffset& /* offset */, bool /* is_static */) const {
1162 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1163 chunk_task_->MarkStackPush(ref);
1164 }
1165 }
1166
1167 private:
1168 MarkStackChunk* const chunk_task_;
1169 };
1170
1171 // Push an object into the block.
1172 // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1173 // given time.
1174 void Push(Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001175 CHECK(obj != NULL);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001176 data_[length_++] = obj;
1177 }
1178
1179 void MarkStackPush(const Object* obj) {
1180 if (static_cast<size_t>(length_) < max_size) {
1181 Push(const_cast<Object*>(obj));
1182 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07001183 // Internal (thread-local) buffer is full, push to a new buffer instead.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001184 if (UNLIKELY(output_ == NULL)) {
1185 AllocateOutputChunk();
1186 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1187 // Output block is full, queue it up for processing and obtain a new block.
1188 EnqueueOutput();
1189 AllocateOutputChunk();
1190 }
1191 output_->Push(const_cast<Object*>(obj));
1192 }
1193 }
1194
1195 void ScanObject(Object* obj) {
1196 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1197 }
1198
1199 void EnqueueOutput() {
1200 if (output_ != NULL) {
1201 uint64_t start = 0;
1202 if (kMeasureOverhead) {
1203 start = NanoTime();
1204 }
1205 thread_pool_->AddTask(Thread::Current(), output_);
1206 output_ = NULL;
1207 if (kMeasureOverhead) {
1208 mark_sweep_->overhead_time_ += NanoTime() - start;
1209 }
1210 }
1211 }
1212
1213 void AllocateOutputChunk() {
1214 uint64_t start = 0;
1215 if (kMeasureOverhead) {
1216 start = NanoTime();
1217 }
1218 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1219 if (kMeasureOverhead) {
1220 mark_sweep_->overhead_time_ += NanoTime() - start;
1221 }
1222 }
1223
1224 void Finalize() {
1225 EnqueueOutput();
1226 delete this;
1227 }
1228
1229 // Scans all of the objects
1230 virtual void Run(Thread* self) {
Brian Carlstromd74e41b2013-03-24 23:47:01 -07001231 size_t index;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001232 while ((index = index_++) < length_) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001233 if (kUseMarkStackPrefetch) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001234 static const size_t prefetch_look_ahead = 1;
1235 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001236 }
1237 Object* obj = data_[index];
1238 DCHECK(obj != NULL);
1239 ScanObject(obj);
1240 }
1241 }
1242};
1243
1244void MarkSweep::ProcessMarkStackParallel() {
1245 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1246 Thread* self = Thread::Current();
1247 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1248 // Split the current mark stack up into work tasks.
1249 const size_t num_threads = thread_pool->GetThreadCount();
1250 const size_t stack_size = mark_stack_->Size();
1251 const size_t chunk_size =
1252 std::min((stack_size + num_threads - 1) / num_threads,
1253 static_cast<size_t>(MarkStackChunk::max_size));
1254 size_t index = 0;
1255 for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1256 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1257 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1258 index += chunk_size;
1259 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1260 }
1261 thread_pool->StartWorkers(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001262 thread_pool->Wait(self, true, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001263 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001264 //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
1265 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001266}
1267
Ian Rogers5d76c432011-10-31 21:42:49 -07001268// Scan anything that's on the mark stack.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001269void MarkSweep::ProcessMarkStack() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001270 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1271 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1272 ProcessMarkStackParallel();
1273 return;
1274 }
1275
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001276 if (kUseMarkStackPrefetch) {
1277 const size_t fifo_size = 4;
1278 const size_t fifo_mask = fifo_size - 1;
1279 const Object* fifo[fifo_size];
1280 for (size_t i = 0;i < fifo_size;++i) {
1281 fifo[i] = NULL;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001282 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001283 size_t fifo_pos = 0;
1284 size_t fifo_count = 0;
1285 for (;;) {
1286 const Object* obj = fifo[fifo_pos & fifo_mask];
1287 if (obj != NULL) {
1288 ScanObject(obj);
1289 fifo[fifo_pos & fifo_mask] = NULL;
1290 --fifo_count;
1291 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001292
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001293 if (!mark_stack_->IsEmpty()) {
1294 const Object* obj = mark_stack_->PopBack();
1295 DCHECK(obj != NULL);
1296 fifo[fifo_pos & fifo_mask] = obj;
1297 __builtin_prefetch(obj);
1298 fifo_count++;
1299 }
1300 fifo_pos++;
1301
1302 if (!fifo_count) {
1303 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1304 break;
1305 }
1306 }
1307 } else {
1308 while (!mark_stack_->IsEmpty()) {
1309 const Object* obj = mark_stack_->PopBack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001310 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001311 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001312 }
1313 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001314}
1315
Carl Shapiro69759ea2011-07-21 18:13:35 -07001316// Walks the reference list marking any references subject to the
1317// reference clearing policy. References with a black referent are
1318// removed from the list. References with white referents biased
1319// toward saving are blackened and also removed from the list.
1320void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1321 DCHECK(list != NULL);
1322 Object* clear = NULL;
1323 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001324
1325 DCHECK(mark_stack_->IsEmpty());
1326
Carl Shapiro69759ea2011-07-21 18:13:35 -07001327 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001328 Object* ref = heap_->DequeuePendingReference(list);
1329 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001330 if (referent == NULL) {
1331 // Referent was cleared by the user during marking.
1332 continue;
1333 }
1334 bool is_marked = IsMarked(referent);
1335 if (!is_marked && ((++counter) & 1)) {
1336 // Referent is white and biased toward saving, mark it.
1337 MarkObject(referent);
1338 is_marked = true;
1339 }
1340 if (!is_marked) {
1341 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001342 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001343 }
1344 }
1345 *list = clear;
1346 // Restart the mark with the newly black references added to the
1347 // root set.
1348 ProcessMarkStack();
1349}
1350
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001351inline bool MarkSweep::IsMarked(const Object* object) const
1352 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1353 if (object >= immune_begin_ && object < immune_end_) {
1354 return true;
1355 }
1356 DCHECK(current_mark_bitmap_ != NULL);
1357 if (current_mark_bitmap_->HasAddress(object)) {
1358 return current_mark_bitmap_->Test(object);
1359 }
1360 return heap_->GetMarkBitmap()->Test(object);
1361}
1362
1363
Carl Shapiro69759ea2011-07-21 18:13:35 -07001364// Unlink the reference list clearing references objects with white
1365// referents. Cleared references registered to a reference queue are
1366// scheduled for appending by the heap worker thread.
1367void MarkSweep::ClearWhiteReferences(Object** list) {
1368 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001369 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001370 Object* ref = heap_->DequeuePendingReference(list);
1371 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001372 if (referent != NULL && !IsMarked(referent)) {
1373 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001374 heap_->ClearReferenceReferent(ref);
1375 if (heap_->IsEnqueuable(ref)) {
1376 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001377 }
1378 }
1379 }
1380 DCHECK(*list == NULL);
1381}
1382
1383// Enqueues finalizer references with white referents. White
1384// referents are blackened, moved to the zombie field, and the
1385// referent field is cleared.
1386void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1387 DCHECK(list != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001388 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001389 bool has_enqueued = false;
1390 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001391 Object* ref = heap_->DequeuePendingReference(list);
1392 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001393 if (referent != NULL && !IsMarked(referent)) {
1394 MarkObject(referent);
1395 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001396 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001397 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001398 heap_->ClearReferenceReferent(ref);
1399 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001400 has_enqueued = true;
1401 }
1402 }
1403 if (has_enqueued) {
1404 ProcessMarkStack();
1405 }
1406 DCHECK(*list == NULL);
1407}
1408
Carl Shapiro58551df2011-07-24 03:09:51 -07001409// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001410void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1411 Object** weak_references,
1412 Object** finalizer_references,
1413 Object** phantom_references) {
1414 DCHECK(soft_references != NULL);
1415 DCHECK(weak_references != NULL);
1416 DCHECK(finalizer_references != NULL);
1417 DCHECK(phantom_references != NULL);
1418
1419 // Unless we are in the zygote or required to clear soft references
1420 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001421 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001422 PreserveSomeSoftReferences(soft_references);
1423 }
1424
1425 // Clear all remaining soft and weak references with white
1426 // referents.
1427 ClearWhiteReferences(soft_references);
1428 ClearWhiteReferences(weak_references);
1429
1430 // Preserve all white objects with finalize methods and schedule
1431 // them for finalization.
1432 EnqueueFinalizerReferences(finalizer_references);
1433
1434 // Clear all f-reachable soft and weak references with white
1435 // referents.
1436 ClearWhiteReferences(soft_references);
1437 ClearWhiteReferences(weak_references);
1438
1439 // Clear all phantom references with white referents.
1440 ClearWhiteReferences(phantom_references);
1441
1442 // At this point all reference lists should be empty.
1443 DCHECK(*soft_references == NULL);
1444 DCHECK(*weak_references == NULL);
1445 DCHECK(*finalizer_references == NULL);
1446 DCHECK(*phantom_references == NULL);
1447}
1448
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001449void MarkSweep::UnBindBitmaps() {
Ian Rogers1d54e732013-05-02 21:10:01 -07001450 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1451 // TODO: C++0x
1452 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1453 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1454 space::ContinuousSpace* space = *it;
1455 if (space->IsDlMallocSpace()) {
1456 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001457 if (alloc_space->temp_bitmap_.get() != NULL) {
1458 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001459 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001460 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1461 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1462 alloc_space->mark_bitmap_.reset(new_bitmap);
1463 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1464 }
1465 }
1466 }
1467}
1468
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001469void MarkSweep::FinishPhase() {
1470 // Can't enqueue referneces if we hold the mutator lock.
1471 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001472 Heap* heap = GetHeap();
1473 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001474
Ian Rogers1d54e732013-05-02 21:10:01 -07001475 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001476
Ian Rogers1d54e732013-05-02 21:10:01 -07001477 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001478 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001479
Ian Rogers1d54e732013-05-02 21:10:01 -07001480 timings_.NewSplit("RequestHeapTrim");
1481 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001482
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001483 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001484 total_time_ns_ += GetDurationNs();
1485 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1486 std::plus<uint64_t>());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001487 total_freed_objects_ += GetFreedObjects();
1488 total_freed_bytes_ += GetFreedBytes();
1489
1490 // Ensure that the mark stack is empty.
1491 CHECK(mark_stack_->IsEmpty());
1492
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001493 if (kCountScannedTypes) {
1494 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1495 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001496 }
1497
1498 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001499 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001500 }
1501
1502 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001503 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001504 }
1505
1506 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001507 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001508 }
1509
1510 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001511 VLOG(gc) << "Classes marked " << classes_marked_;
1512 }
1513
1514 if (kCountJavaLangRefs) {
1515 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001516 }
1517
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001518 // Update the cumulative loggers.
1519 cumulative_timings_.Start();
Ian Rogers1d54e732013-05-02 21:10:01 -07001520 cumulative_timings_.AddNewLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001521 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001522
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001523 // Clear all of the spaces' mark bitmaps.
Ian Rogers1d54e732013-05-02 21:10:01 -07001524 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1525 // TODO: C++0x
1526 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1527 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1528 space::ContinuousSpace* space = *it;
1529 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001530 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001531 }
1532 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001533 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001534
1535 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001536 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001537 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001538}
1539
Ian Rogers1d54e732013-05-02 21:10:01 -07001540} // namespace collector
1541} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001542} // namespace art