blob: 374d3f4b195df1c2ee691487cac4bfc91429ee7c [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Elliott Hughes07ed66b2012-12-12 18:34:25 -080024#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080025#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080027#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070028#include "gc/accounting/card_table-inl.h"
29#include "gc/accounting/heap_bitmap.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/image_space.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070035#include "indirect_reference_table.h"
36#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070037#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070038#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "mark_sweep-inl.h"
40#include "mirror/class-inl.h"
41#include "mirror/class_loader.h"
42#include "mirror/dex_cache.h"
43#include "mirror/field.h"
44#include "mirror/field-inl.h"
45#include "mirror/object-inl.h"
46#include "mirror/object_array.h"
47#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070048#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070049#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070050#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070051#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070053using ::art::mirror::Class;
54using ::art::mirror::Field;
55using ::art::mirror::Object;
56using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080057
Carl Shapiro69759ea2011-07-21 18:13:35 -070058namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070059namespace gc {
60namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070061
Mathieu Chartier02b6a782012-10-26 13:51:26 -070062// Performance options.
63static const bool kParallelMarkStack = true;
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
Mathieu Chartier858f1c52012-10-17 17:45:55 -070065static const bool kUseMarkStackPrefetch = true;
66
Mathieu Chartier02b6a782012-10-26 13:51:26 -070067// Profiling and information flags.
68static const bool kCountClassesMarked = false;
69static const bool kProfileLargeObjects = false;
70static const bool kMeasureOverhead = false;
71static const bool kCountTasks = false;
Mathieu Chartierd22d5482012-11-06 17:14:12 -080072static const bool kCountJavaLangRefs = false;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070073
Ian Rogers1d54e732013-05-02 21:10:01 -070074void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080075 // Bind live to mark bitmap if necessary.
76 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
77 BindLiveToMarkBitmap(space);
78 }
79
80 // Add the space to the immune region.
81 if (immune_begin_ == NULL) {
82 DCHECK(immune_end_ == NULL);
83 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
84 reinterpret_cast<Object*>(space->End()));
85 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -070086 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
87 const space::ContinuousSpace* prev_space = NULL;
88 // Find out if the previous space is immune.
89 // TODO: C++0x
90 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
91 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
92 if (*it == space) {
93 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080094 }
Ian Rogers1d54e732013-05-02 21:10:01 -070095 prev_space = *it;
96 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -080097
Ian Rogers1d54e732013-05-02 21:10:01 -070098 // If previous space was immune, then extend the immune region. Relies on continuous spaces
99 // being sorted by Heap::AddContinuousSpace.
100 if (prev_space != NULL &&
101 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
102 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800103 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
104 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
105 }
106 }
107}
108
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800109void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700110 timings_.StartSplit("BindBitmaps");
Ian Rogers1d54e732013-05-02 21:10:01 -0700111 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800112 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
113
114 // Mark all of the spaces we never collect as immune.
Ian Rogers1d54e732013-05-02 21:10:01 -0700115 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
116 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
117 space::ContinuousSpace* space = *it;
118 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800119 ImmuneSpace(space);
120 }
121 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700122 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800123}
124
Ian Rogers1d54e732013-05-02 21:10:01 -0700125MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
126 : GarbageCollector(heap,
127 name_prefix + (name_prefix.empty() ? "" : " ") +
128 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
129 current_mark_bitmap_(NULL),
130 java_lang_Class_(NULL),
131 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700132 immune_begin_(NULL),
133 immune_end_(NULL),
134 soft_reference_list_(NULL),
135 weak_reference_list_(NULL),
136 finalizer_reference_list_(NULL),
137 phantom_reference_list_(NULL),
138 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800139 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800140 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
141 mark_stack_expand_lock_("mark sweep mark stack expand lock"),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700142 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700143 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800144}
145
146void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700147 timings_.Reset();
Anwar Ghuloum46543222013-08-12 09:28:42 -0700148 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800149 mark_stack_ = GetHeap()->mark_stack_.get();
150 DCHECK(mark_stack_ != NULL);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800151 SetImmuneRange(NULL, NULL);
152 soft_reference_list_ = NULL;
153 weak_reference_list_ = NULL;
154 finalizer_reference_list_ = NULL;
155 phantom_reference_list_ = NULL;
156 cleared_reference_list_ = NULL;
157 freed_bytes_ = 0;
158 freed_objects_ = 0;
159 class_count_ = 0;
160 array_count_ = 0;
161 other_count_ = 0;
162 large_object_test_ = 0;
163 large_object_mark_ = 0;
164 classes_marked_ = 0;
165 overhead_time_ = 0;
166 work_chunks_created_ = 0;
167 work_chunks_deleted_ = 0;
168 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700169 java_lang_Class_ = Class::GetJavaLangClass();
170 CHECK(java_lang_Class_ != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700171
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700172 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700173
174// Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700175 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800176 heap_->PreGcVerification(this);
177}
178
179void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800180 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800181 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
182 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800183}
184
185bool MarkSweep::HandleDirtyObjectsPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700186 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700188 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 Locks::mutator_lock_->AssertExclusiveHeld(self);
190
191 {
192 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
193
194 // Re-mark root set.
195 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800196
197 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Ian Rogers1d54e732013-05-02 21:10:01 -0700198 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800199 }
200
201 ProcessReferences(self);
202
203 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
204 if (GetHeap()->verify_missing_card_marks_) {
205 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
206 // This second sweep makes sure that we don't have any objects in the live stack which point to
207 // freed objects. These cause problems since their references may be previously freed objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700208 SweepArray(allocation_stack, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800209 }
210 return true;
211}
212
213bool MarkSweep::IsConcurrent() const {
214 return is_concurrent_;
215}
216
217void MarkSweep::MarkingPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700218 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800219 Heap* heap = GetHeap();
220 Thread* self = Thread::Current();
221
222 BindBitmaps();
223 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700224
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800225 // Process dirty cards and add dirty cards to mod union tables.
226 heap->ProcessCards(timings_);
227
228 // Need to do this before the checkpoint since we don't want any threads to add references to
229 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700230 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800231 heap->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800232
233 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
234 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
235 // If we exclusively hold the mutator lock, all threads must be suspended.
236 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800237 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700238 MarkRootsCheckpoint(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800239 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240 }
241 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800242
243 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
244 MarkReachableObjects();
245}
246
247void MarkSweep::MarkReachableObjects() {
248 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
249 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700250 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700251 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800252 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
253 heap_->large_object_space_->GetLiveObjects(),
254 live_stack);
255 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700256 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800257 // Recursively mark all the non-image bits set in the mark bitmap.
258 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800259}
260
261void MarkSweep::ReclaimPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700262 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263 Thread* self = Thread::Current();
264
265 if (!IsConcurrent()) {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700266 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800267 ProcessReferences(self);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700268 } else {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700269 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700270 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
271 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
272 // The allocation stack contains things allocated since the start of the GC. These may have been
273 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
274 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
275 // collection.
276 // There is a race here which is safely handled. Another thread such as the hprof could
277 // have flushed the alloc stack after we resumed the threads. This is safe however, since
278 // reseting the allocation stack zeros it out with madvise. This means that we will either
279 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
280 // first place.
281 mirror::Object** end = allocation_stack->End();
282 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
283 Object* obj = *it;
284 if (obj != NULL) {
285 UnMarkObjectNonNull(obj);
286 }
287 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800288 }
289
290 // Before freeing anything, lets verify the heap.
291 if (kIsDebugBuild) {
292 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
293 VerifyImageRoots();
294 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700295 timings_.StartSplit("PreSweepingGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800296 heap_->PreSweepingGcVerification(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700297 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800298
299 {
300 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
301
302 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700303 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800304
305 // Swap the live and mark bitmaps for each space which we modified space. This is an
306 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
307 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700308 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800309 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700310 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800311
312 // Unbind the live and mark bitmaps.
313 UnBindBitmaps();
314 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800315}
316
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800317void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
318 immune_begin_ = begin;
319 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700320}
321
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700322void MarkSweep::FindDefaultMarkBitmap() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700323 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700324 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
325 // TODO: C++0x
326 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
327 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
328 space::ContinuousSpace* space = *it;
329 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700330 current_mark_bitmap_ = (*it)->GetMarkBitmap();
331 CHECK(current_mark_bitmap_ != NULL);
332 return;
333 }
334 }
335 GetHeap()->DumpSpaces();
336 LOG(FATAL) << "Could not find a default mark bitmap";
337}
338
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800339void MarkSweep::ExpandMarkStack() {
340 // Rare case, no need to have Thread::Current be a parameter.
341 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
342 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
343 // Someone else acquired the lock and expanded the mark stack before us.
344 return;
345 }
346 std::vector<Object*> temp;
347 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
348 mark_stack_->Resize(mark_stack_->Capacity() * 2);
349 for (size_t i = 0; i < temp.size(); ++i) {
350 mark_stack_->PushBack(temp[i]);
351 }
352}
353
Mathieu Chartier9642c962013-08-05 17:40:36 -0700354inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800355 DCHECK(obj != NULL);
356 if (MarkObjectParallel(obj)) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700357 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
358 // Only reason a push can fail is that the mark stack is full.
359 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800360 }
361 }
362}
363
Mathieu Chartier9642c962013-08-05 17:40:36 -0700364inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
365 DCHECK(!IsImmune(obj));
366 // Try to take advantage of locality of references within a space, failing this find the space
367 // the hard way.
368 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
369 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
370 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
371 if (LIKELY(new_bitmap != NULL)) {
372 object_bitmap = new_bitmap;
373 } else {
374 MarkLargeObject(obj, false);
375 return;
376 }
377 }
378
379 DCHECK(object_bitmap->HasAddress(obj));
380 object_bitmap->Clear(obj);
381}
382
383inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700384 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700385
Mathieu Chartier9642c962013-08-05 17:40:36 -0700386 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700387 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700388 return;
389 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700390
391 // Try to take advantage of locality of references within a space, failing this find the space
392 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700393 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700394 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700395 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
396 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700397 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700398 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700399 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700400 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700401 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700402 }
403
Carl Shapiro69759ea2011-07-21 18:13:35 -0700404 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700405 if (!object_bitmap->Test(obj)) {
406 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700407 // Do we need to expand the mark stack?
408 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
409 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700410 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700411 // The object must be pushed on to the mark stack.
412 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700413 }
414}
415
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700416// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700417bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700418 // TODO: support >1 discontinuous space.
419 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
420 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700421 if (kProfileLargeObjects) {
422 ++large_object_test_;
423 }
424 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700425 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700426 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
427 LOG(ERROR) << "Attempting see if it's a bad root";
428 VerifyRoots();
429 LOG(FATAL) << "Can't mark bad root";
430 }
431 if (kProfileLargeObjects) {
432 ++large_object_mark_;
433 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700434 if (set) {
435 large_objects->Set(obj);
436 } else {
437 large_objects->Clear(obj);
438 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700439 return true;
440 }
441 return false;
442}
443
444inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
445 DCHECK(obj != NULL);
446
Mathieu Chartier9642c962013-08-05 17:40:36 -0700447 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700448 DCHECK(IsMarked(obj));
449 return false;
450 }
451
452 // Try to take advantage of locality of references within a space, failing this find the space
453 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700454 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700455 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700456 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700457 if (new_bitmap != NULL) {
458 object_bitmap = new_bitmap;
459 } else {
460 // TODO: Remove the Thread::Current here?
461 // TODO: Convert this to some kind of atomic marking?
462 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700463 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700464 }
465 }
466
467 // Return true if the object was not previously marked.
468 return !object_bitmap->AtomicTestAndSet(obj);
469}
470
Carl Shapiro69759ea2011-07-21 18:13:35 -0700471// Used to mark objects when recursing. Recursion is done by moving
472// the finger across the bitmaps in address order and marking child
473// objects. Any newly-marked objects whose addresses are lower than
474// the finger won't be visited by the bitmap scan, so those objects
475// need to be added to the mark stack.
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700476void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700477 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700478 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700479 }
480}
481
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800482void MarkSweep::MarkRoot(const Object* obj) {
483 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700484 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800485 }
486}
487
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800488void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
489 DCHECK(root != NULL);
490 DCHECK(arg != NULL);
491 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700492 mark_sweep->MarkObjectNonNullParallel(root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800493}
494
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700495void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700496 DCHECK(root != NULL);
497 DCHECK(arg != NULL);
498 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700499 mark_sweep->MarkObjectNonNull(root);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700500}
501
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700502void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
503 DCHECK(root != NULL);
504 DCHECK(arg != NULL);
505 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700506 mark_sweep->MarkObjectNonNull(root);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700507}
508
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700509void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800510 const StackVisitor* visitor) {
511 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700512}
513
Ian Rogers40e3bac2012-11-20 00:09:14 -0800514void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700515 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700516 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
517 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700518 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700519 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800520 if (visitor != NULL) {
521 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700522 }
523 }
524 }
525}
526
527void MarkSweep::VerifyRoots() {
528 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
529}
530
Carl Shapiro69759ea2011-07-21 18:13:35 -0700531// Marks all objects in the root set.
532void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700533 timings_.StartSplit("MarkRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700534 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700535 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700536}
537
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700538void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700539 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700540 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700541 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700542}
543
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700544void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700545 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700546 // Visit all runtime roots and clear dirty flags.
547 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700548 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700549}
550
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700551class CheckObjectVisitor {
552 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700553 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700554
Brian Carlstromdf629502013-07-17 22:39:56 -0700555 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800556 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800557 if (kDebugLocking) {
558 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
559 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700560 mark_sweep_->CheckReference(obj, ref, offset, is_static);
561 }
562
563 private:
564 MarkSweep* const mark_sweep_;
565};
566
567void MarkSweep::CheckObject(const Object* obj) {
568 DCHECK(obj != NULL);
569 CheckObjectVisitor visitor(this);
570 VisitObjectReferences(obj, visitor);
571}
572
573void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
574 DCHECK(root != NULL);
575 DCHECK(arg != NULL);
576 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700577 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700578 mark_sweep->CheckObject(root);
579}
580
Ian Rogers1d54e732013-05-02 21:10:01 -0700581void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
582 CHECK(space->IsDlMallocSpace());
583 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
584 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
585 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700586 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
587 alloc_space->temp_bitmap_.reset(mark_bitmap);
588 alloc_space->mark_bitmap_.reset(live_bitmap);
589}
590
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700591class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700592 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700593 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700594
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800595 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -0700596 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800597 if (kDebugLocking) {
598 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
599 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
600 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700601 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700602 }
603
604 private:
605 MarkSweep* const mark_sweep_;
606};
607
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800608void MarkSweep::ScanGrayObjects(byte minimum_age) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700609 accounting::CardTable* card_table = GetHeap()->GetCardTable();
610 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700611 ScanObjectVisitor visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700612 // TODO: C++0x
613 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
614 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
615 space::ContinuousSpace* space = *it;
616 switch (space->GetGcRetentionPolicy()) {
617 case space::kGcRetentionPolicyNeverCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700618 timings_.StartSplit("ScanGrayImageSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700619 break;
620 case space::kGcRetentionPolicyFullCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700621 timings_.StartSplit("ScanGrayZygoteSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700622 break;
623 case space::kGcRetentionPolicyAlwaysCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700624 timings_.StartSplit("ScanGrayAllocSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700625 break;
626 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700627 byte* begin = space->Begin();
628 byte* end = space->End();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700629 // Image spaces are handled properly since live == marked for them.
Ian Rogers1d54e732013-05-02 21:10:01 -0700630 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier184e3222013-08-03 14:02:57 -0700631 card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700632 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700633 }
634}
635
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700636class CheckBitmapVisitor {
637 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700638 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700639
Brian Carlstromdf629502013-07-17 22:39:56 -0700640 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800641 if (kDebugLocking) {
642 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
643 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700644 DCHECK(obj != NULL);
645 mark_sweep_->CheckObject(obj);
646 }
647
648 private:
649 MarkSweep* mark_sweep_;
650};
651
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700652void MarkSweep::VerifyImageRoots() {
653 // Verify roots ensures that all the references inside the image space point
654 // objects which are either in the image space or marked objects in the alloc
655 // space
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700656 timings_.StartSplit("VerifyImageRoots");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700657 CheckBitmapVisitor visitor(this);
Ian Rogers1d54e732013-05-02 21:10:01 -0700658 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
659 // TODO: C++0x
660 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
661 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700662 if ((*it)->IsImageSpace()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700663 space::ImageSpace* space = (*it)->AsImageSpace();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700664 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
665 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700666 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700667 DCHECK(live_bitmap != NULL);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700668 live_bitmap->VisitMarkedRange(begin, end, visitor);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700669 }
670 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700671 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700672}
673
Carl Shapiro58551df2011-07-24 03:09:51 -0700674// Populates the mark stack based on the set of marked objects and
675// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800676void MarkSweep::RecursiveMark() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700677 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700678 // RecursiveMark will build the lists of known instances of the Reference classes.
679 // See DelayReferenceReferent for details.
680 CHECK(soft_reference_list_ == NULL);
681 CHECK(weak_reference_list_ == NULL);
682 CHECK(finalizer_reference_list_ == NULL);
683 CHECK(phantom_reference_list_ == NULL);
684 CHECK(cleared_reference_list_ == NULL);
685
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800686 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700687 ScanObjectVisitor scan_visitor(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800688 if (!kDisableFinger) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700689 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
690 // TODO: C++0x
691 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
692 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
693 space::ContinuousSpace* space = *it;
694 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
695 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800696 current_mark_bitmap_ = space->GetMarkBitmap();
697 if (current_mark_bitmap_ == NULL) {
698 GetHeap()->DumpSpaces();
699 LOG(FATAL) << "invalid bitmap";
700 }
701 // This function does not handle heap end increasing, so we must use the space end.
702 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
703 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier184e3222013-08-03 14:02:57 -0700704 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700705 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700706 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700707 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700708 ProcessMarkStack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700709}
710
711bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
712 return
713 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700714 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
715}
716
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800717void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
718 ScanGrayObjects(minimum_age);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700719 ProcessMarkStack();
720}
721
Carl Shapiro58551df2011-07-24 03:09:51 -0700722void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700723 timings_.StartSplit("ReMarkRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700724 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700725 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700726}
727
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800728void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700729 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700730 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700731 IndirectReferenceTable* table = &vm->weak_globals;
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700732 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
Elliott Hughes410c0c82011-09-01 17:58:25 -0700733 for (It it = table->begin(), end = table->end(); it != end; ++it) {
734 const Object** entry = *it;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700735 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700736 *entry = kClearedJniWeakGlobal;
737 }
738 }
739}
740
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700741struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -0700742 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700743 MarkSweep* mark_sweep;
744};
745
746// Either marked or not live.
747bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
748 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
749 if (array_check->mark_sweep->IsMarked(object)) {
750 return true;
751 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700752 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700753 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
754}
755
Ian Rogers1d54e732013-05-02 21:10:01 -0700756void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700757 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700758 // The callbacks check
759 // !is_marked where is_marked is the callback but we want
760 // !IsMarked && IsLive
761 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
762 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700763
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700764 timings_.StartSplit("SweepSystemWeaksArray");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700765 ArrayMarkedCheck visitor;
766 visitor.live_stack = allocations;
767 visitor.mark_sweep = this;
768 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
769 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
770 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700771 timings_.EndSplit();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700772}
773
774void MarkSweep::SweepSystemWeaks() {
775 Runtime* runtime = Runtime::Current();
776 // The callbacks check
777 // !is_marked where is_marked is the callback but we want
778 // !IsMarked && IsLive
779 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
780 // Or for swapped (IsLive || !IsMarked).
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700781 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700782 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
783 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
784 SweepJniWeakGlobals(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700785 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700786}
787
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700788bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
789 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
790 // We don't actually want to sweep the object, so lets return "marked"
791 return true;
792}
793
794void MarkSweep::VerifyIsLive(const Object* obj) {
795 Heap* heap = GetHeap();
796 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700797 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700798 if (!large_object_space->GetLiveObjects()->Test(obj)) {
799 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
800 heap->allocation_stack_->End()) {
801 // Object not found!
802 heap->DumpSpaces();
803 LOG(FATAL) << "Found dead object " << obj;
804 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700805 }
806 }
807}
808
809void MarkSweep::VerifySystemWeaks() {
810 Runtime* runtime = Runtime::Current();
811 // Verify system weaks, uses a special IsMarked callback which always returns true.
812 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
813 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
814
815 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700816 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700817 IndirectReferenceTable* table = &vm->weak_globals;
818 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
819 for (It it = table->begin(), end = table->end(); it != end; ++it) {
820 const Object** entry = *it;
821 VerifyIsLive(*entry);
822 }
823}
824
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800825struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700826 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -0700827 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700828 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800829};
830
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700831class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700832 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700833 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700834
835 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
836 // Note: self is not necessarily equal to thread since thread may be suspended.
837 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800838 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
839 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800840 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700841 mark_sweep_->GetBarrier().Pass(self);
842 }
843
844 private:
845 MarkSweep* mark_sweep_;
846};
847
Ian Rogers1d54e732013-05-02 21:10:01 -0700848void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800849 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700850 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700851 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -0700852 // Request the check point is run on all threads returning a count of the threads that must
853 // run through the barrier including self.
854 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
855 // Release locks then wait for all mutator threads to pass the barrier.
856 // TODO: optimize to not release locks when there are no threads to wait for.
857 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
858 Locks::mutator_lock_->SharedUnlock(self);
859 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
860 CHECK_EQ(old_state, kWaitingPerformingGc);
861 gc_barrier_->Increment(self, barrier_count);
862 self->SetState(kWaitingPerformingGc);
863 Locks::mutator_lock_->SharedLock(self);
864 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700865 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700866}
867
Ian Rogers30fab402012-01-23 15:43:46 -0800868void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800869 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700870 MarkSweep* mark_sweep = context->mark_sweep;
871 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -0700872 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700873 Thread* self = context->self;
874 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -0700875 // Use a bulk free, that merges consecutive objects before freeing or free per object?
876 // Documentation suggests better free performance with merging, but this may be at the expensive
877 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700878 size_t freed_objects = num_ptrs;
879 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
880 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700881 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700882 mark_sweep->freed_objects_.fetch_add(freed_objects);
883 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700884}
885
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700886void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700887 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -0700888 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700889 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700890 // We don't free any actual memory to avoid dirtying the shared zygote pages.
891 for (size_t i = 0; i < num_ptrs; ++i) {
892 Object* obj = static_cast<Object*>(ptrs[i]);
893 heap->GetLiveBitmap()->Clear(obj);
894 heap->GetCardTable()->MarkCard(obj);
895 }
896}
897
Ian Rogers1d54e732013-05-02 21:10:01 -0700898void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700899 size_t freed_bytes = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700900 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -0700901
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700902 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
903 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700904 SweepSystemWeaksArray(allocations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700905
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700906 timings_.StartSplit("Process allocation stack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700907 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
908 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -0700909 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
910 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
911 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
912 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
913 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700914 if (swap_bitmaps) {
915 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700916 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700917 }
918
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700919 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700920 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700921 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700922 Object** out = objects;
923
924 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -0700925 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -0700926 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700927 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700928 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
929 if (LIKELY(mark_bitmap->HasAddress(obj))) {
930 if (!mark_bitmap->Test(obj)) {
931 // Don't bother un-marking since we clear the mark bitmap anyways.
932 *(out++) = obj;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700933 }
934 } else if (!large_mark_objects->Test(obj)) {
935 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700936 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700937 }
938 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700939 CHECK_EQ(count, allocations->Size());
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700940 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700941
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700942 timings_.StartSplit("FreeList");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700943 size_t freed_objects = out - objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700944 freed_bytes += space->FreeList(self, freed_objects, objects);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700945 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700946 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700947 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700948 freed_objects_.fetch_add(freed_objects);
949 freed_bytes_.fetch_add(freed_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700950 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -0700951
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700952 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700953 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700954 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700955}
956
Ian Rogers1d54e732013-05-02 21:10:01 -0700957void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700958 DCHECK(mark_stack_->IsEmpty());
Anwar Ghuloum46543222013-08-12 09:28:42 -0700959 base::TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700960
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700961 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
962 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700963 SweepSystemWeaks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700964
Ian Rogers1d54e732013-05-02 21:10:01 -0700965 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800966 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700967 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -0700968 scc.self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700969 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
970 // TODO: C++0x
971 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
972 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
973 space::ContinuousSpace* space = *it;
974 // We always sweep always collect spaces.
975 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
976 if (!partial && !sweep_space) {
977 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
978 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
979 }
980 if (sweep_space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700981 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
982 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700983 scc.space = space->AsDlMallocSpace();
984 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
985 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700986 if (swap_bitmaps) {
987 std::swap(live_bitmap, mark_bitmap);
988 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700989 if (!space->IsZygoteSpace()) {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700990 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700991 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -0700992 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
993 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700994 } else {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700995 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700996 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
997 // memory.
998 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
999 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001000 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001001 }
1002 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001003
1004 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001005}
1006
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001007void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001008 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001009 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -07001010 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1011 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1012 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001013 if (swap_bitmaps) {
1014 std::swap(large_live_objects, large_mark_objects);
1015 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001016 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001017 // O(n*log(n)) but hopefully there are not too many large objects.
1018 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001019 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -07001020 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -07001021 // TODO: C++0x
1022 typedef accounting::SpaceSetMap::Objects::iterator It;
1023 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001024 if (!large_mark_objects->Test(*it)) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001025 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001026 ++freed_objects;
1027 }
1028 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001029 freed_objects_.fetch_add(freed_objects);
1030 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001031 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001032}
1033
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001034void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001035 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1036 // TODO: C++0x
1037 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1038 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1039 space::ContinuousSpace* space = *it;
1040 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001041 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001042
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001043 bool is_marked = IsMarked(ref);
1044 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001045 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001046 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1047 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1048 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1049 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001050
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001051 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1052 DCHECK(klass != NULL);
1053 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1054 DCHECK(fields != NULL);
1055 bool found = false;
1056 for (int32_t i = 0; i < fields->GetLength(); ++i) {
1057 const Field* cur = fields->Get(i);
1058 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1059 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1060 found = true;
1061 break;
1062 }
1063 }
1064 if (!found) {
1065 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1066 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001067
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001068 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1069 if (!obj_marked) {
1070 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1071 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1072 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001073 }
1074 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001075 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001076 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001077 }
1078}
1079
Carl Shapiro69759ea2011-07-21 18:13:35 -07001080// Process the "referent" field in a java.lang.ref.Reference. If the
1081// referent has not yet been marked, put it on the appropriate list in
1082// the gcHeap for later processing.
1083void MarkSweep::DelayReferenceReferent(Object* obj) {
1084 DCHECK(obj != NULL);
Brian Carlstrom1f870082011-08-23 16:02:11 -07001085 Class* klass = obj->GetClass();
1086 DCHECK(klass != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001087 DCHECK(klass->IsReferenceClass());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001088 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1089 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001090 if (kCountJavaLangRefs) {
1091 ++reference_count_;
1092 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001093 if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Brian Carlstrom4873d462011-08-21 15:23:39 -07001094 Object** list = NULL;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001095 if (klass->IsSoftReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001096 list = &soft_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001097 } else if (klass->IsWeakReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001098 list = &weak_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001099 } else if (klass->IsFinalizerReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001100 list = &finalizer_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001101 } else if (klass->IsPhantomReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001102 list = &phantom_reference_list_;
1103 }
Brian Carlstrom0796af02011-10-12 14:31:45 -07001104 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001105 // TODO: One lock per list?
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001106 heap_->EnqueuePendingReference(obj, list);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001107 }
1108}
1109
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001110void MarkSweep::ScanRoot(const Object* obj) {
1111 ScanObject(obj);
1112}
1113
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001114class MarkObjectVisitor {
1115 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001116 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001117
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001118 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001119 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1120 bool /* is_static */) const
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001121 NO_THREAD_SAFETY_ANALYSIS {
1122 if (kDebugLocking) {
1123 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1124 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1125 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001126 mark_sweep_->MarkObject(ref);
1127 }
1128
1129 private:
1130 MarkSweep* const mark_sweep_;
1131};
1132
Carl Shapiro69759ea2011-07-21 18:13:35 -07001133// Scans an object reference. Determines the type of the reference
1134// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001135void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001136 MarkObjectVisitor visitor(this);
1137 ScanObjectVisit(obj, visitor);
1138}
1139
1140class MarkStackChunk : public Task {
Ian Rogers1d54e732013-05-02 21:10:01 -07001141 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001142 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1143 : mark_sweep_(mark_sweep),
1144 thread_pool_(thread_pool),
1145 index_(0),
1146 length_(0),
1147 output_(NULL) {
1148 length_ = end - begin;
1149 if (begin != end) {
1150 // Cost not significant since we only do this for the initial set of mark stack chunks.
1151 memcpy(data_, begin, length_ * sizeof(*begin));
1152 }
1153 if (kCountTasks) {
1154 ++mark_sweep_->work_chunks_created_;
1155 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001156 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001157
1158 ~MarkStackChunk() {
1159 DCHECK(output_ == NULL || output_->length_ == 0);
1160 DCHECK_GE(index_, length_);
1161 delete output_;
1162 if (kCountTasks) {
1163 ++mark_sweep_->work_chunks_deleted_;
1164 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001165 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001166
1167 MarkSweep* const mark_sweep_;
1168 ThreadPool* const thread_pool_;
1169 static const size_t max_size = 1 * KB;
1170 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1171 size_t index_;
1172 // Input / output mark stack. We add newly marked references to data_ until length reaches
1173 // max_size. This is an optimization so that less tasks are created.
1174 // TODO: Investigate using a bounded buffer FIFO.
1175 Object* data_[max_size];
1176 // How many elements in data_ we need to scan.
1177 size_t length_;
1178 // Output block, newly marked references get added to the ouput block so that another thread can
1179 // scan them.
1180 MarkStackChunk* output_;
1181
1182 class MarkObjectParallelVisitor {
1183 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001184 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001185
Brian Carlstromdf629502013-07-17 22:39:56 -07001186 void operator()(const Object* /* obj */, const Object* ref,
1187 const MemberOffset& /* offset */, bool /* is_static */) const {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001188 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1189 chunk_task_->MarkStackPush(ref);
1190 }
1191 }
1192
1193 private:
1194 MarkStackChunk* const chunk_task_;
1195 };
1196
1197 // Push an object into the block.
1198 // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1199 // given time.
1200 void Push(Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001201 CHECK(obj != NULL);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001202 data_[length_++] = obj;
1203 }
1204
1205 void MarkStackPush(const Object* obj) {
1206 if (static_cast<size_t>(length_) < max_size) {
1207 Push(const_cast<Object*>(obj));
1208 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07001209 // Internal (thread-local) buffer is full, push to a new buffer instead.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001210 if (UNLIKELY(output_ == NULL)) {
1211 AllocateOutputChunk();
1212 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1213 // Output block is full, queue it up for processing and obtain a new block.
1214 EnqueueOutput();
1215 AllocateOutputChunk();
1216 }
1217 output_->Push(const_cast<Object*>(obj));
1218 }
1219 }
1220
1221 void ScanObject(Object* obj) {
1222 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1223 }
1224
1225 void EnqueueOutput() {
1226 if (output_ != NULL) {
1227 uint64_t start = 0;
1228 if (kMeasureOverhead) {
1229 start = NanoTime();
1230 }
1231 thread_pool_->AddTask(Thread::Current(), output_);
1232 output_ = NULL;
1233 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001234 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001235 }
1236 }
1237 }
1238
1239 void AllocateOutputChunk() {
1240 uint64_t start = 0;
1241 if (kMeasureOverhead) {
1242 start = NanoTime();
1243 }
1244 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1245 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001246 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001247 }
1248 }
1249
1250 void Finalize() {
1251 EnqueueOutput();
1252 delete this;
1253 }
1254
1255 // Scans all of the objects
1256 virtual void Run(Thread* self) {
Brian Carlstromd74e41b2013-03-24 23:47:01 -07001257 size_t index;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001258 while ((index = index_++) < length_) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001259 if (kUseMarkStackPrefetch) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001260 static const size_t prefetch_look_ahead = 1;
1261 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001262 }
1263 Object* obj = data_[index];
1264 DCHECK(obj != NULL);
1265 ScanObject(obj);
1266 }
1267 }
1268};
1269
1270void MarkSweep::ProcessMarkStackParallel() {
1271 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1272 Thread* self = Thread::Current();
1273 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1274 // Split the current mark stack up into work tasks.
1275 const size_t num_threads = thread_pool->GetThreadCount();
1276 const size_t stack_size = mark_stack_->Size();
1277 const size_t chunk_size =
1278 std::min((stack_size + num_threads - 1) / num_threads,
1279 static_cast<size_t>(MarkStackChunk::max_size));
1280 size_t index = 0;
1281 for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1282 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1283 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1284 index += chunk_size;
1285 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1286 }
1287 thread_pool->StartWorkers(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001288 thread_pool->Wait(self, true, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001289 mark_stack_->Reset();
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001290 // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001291 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001292}
1293
Ian Rogers5d76c432011-10-31 21:42:49 -07001294// Scan anything that's on the mark stack.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001295void MarkSweep::ProcessMarkStack() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001296 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001297 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001298 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1299 ProcessMarkStackParallel();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001300 timings_.EndSplit();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001301 return;
1302 }
1303
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001304 if (kUseMarkStackPrefetch) {
1305 const size_t fifo_size = 4;
1306 const size_t fifo_mask = fifo_size - 1;
1307 const Object* fifo[fifo_size];
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001308 for (size_t i = 0; i < fifo_size; ++i) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001309 fifo[i] = NULL;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001310 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001311 size_t fifo_pos = 0;
1312 size_t fifo_count = 0;
1313 for (;;) {
1314 const Object* obj = fifo[fifo_pos & fifo_mask];
1315 if (obj != NULL) {
1316 ScanObject(obj);
1317 fifo[fifo_pos & fifo_mask] = NULL;
1318 --fifo_count;
1319 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001320
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001321 if (!mark_stack_->IsEmpty()) {
1322 const Object* obj = mark_stack_->PopBack();
1323 DCHECK(obj != NULL);
1324 fifo[fifo_pos & fifo_mask] = obj;
1325 __builtin_prefetch(obj);
1326 fifo_count++;
1327 }
1328 fifo_pos++;
1329
1330 if (!fifo_count) {
1331 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1332 break;
1333 }
1334 }
1335 } else {
1336 while (!mark_stack_->IsEmpty()) {
1337 const Object* obj = mark_stack_->PopBack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001338 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001339 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001340 }
1341 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001342 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001343}
1344
Carl Shapiro69759ea2011-07-21 18:13:35 -07001345// Walks the reference list marking any references subject to the
1346// reference clearing policy. References with a black referent are
1347// removed from the list. References with white referents biased
1348// toward saving are blackened and also removed from the list.
1349void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1350 DCHECK(list != NULL);
1351 Object* clear = NULL;
1352 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001353
1354 DCHECK(mark_stack_->IsEmpty());
1355
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001356 timings_.StartSplit("PreserveSomeSoftReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001357 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001358 Object* ref = heap_->DequeuePendingReference(list);
1359 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001360 if (referent == NULL) {
1361 // Referent was cleared by the user during marking.
1362 continue;
1363 }
1364 bool is_marked = IsMarked(referent);
1365 if (!is_marked && ((++counter) & 1)) {
1366 // Referent is white and biased toward saving, mark it.
1367 MarkObject(referent);
1368 is_marked = true;
1369 }
1370 if (!is_marked) {
1371 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001372 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001373 }
1374 }
1375 *list = clear;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001376 timings_.EndSplit();
1377
Carl Shapiro69759ea2011-07-21 18:13:35 -07001378 // Restart the mark with the newly black references added to the
1379 // root set.
1380 ProcessMarkStack();
1381}
1382
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001383inline bool MarkSweep::IsMarked(const Object* object) const
1384 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001385 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001386 return true;
1387 }
1388 DCHECK(current_mark_bitmap_ != NULL);
1389 if (current_mark_bitmap_->HasAddress(object)) {
1390 return current_mark_bitmap_->Test(object);
1391 }
1392 return heap_->GetMarkBitmap()->Test(object);
1393}
1394
1395
Carl Shapiro69759ea2011-07-21 18:13:35 -07001396// Unlink the reference list clearing references objects with white
1397// referents. Cleared references registered to a reference queue are
1398// scheduled for appending by the heap worker thread.
1399void MarkSweep::ClearWhiteReferences(Object** list) {
1400 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001401 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001402 Object* ref = heap_->DequeuePendingReference(list);
1403 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001404 if (referent != NULL && !IsMarked(referent)) {
1405 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001406 heap_->ClearReferenceReferent(ref);
1407 if (heap_->IsEnqueuable(ref)) {
1408 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001409 }
1410 }
1411 }
1412 DCHECK(*list == NULL);
1413}
1414
1415// Enqueues finalizer references with white referents. White
1416// referents are blackened, moved to the zombie field, and the
1417// referent field is cleared.
1418void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1419 DCHECK(list != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001420 timings_.StartSplit("EnqueueFinalizerReferences");
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001421 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001422 bool has_enqueued = false;
1423 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001424 Object* ref = heap_->DequeuePendingReference(list);
1425 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001426 if (referent != NULL && !IsMarked(referent)) {
1427 MarkObject(referent);
1428 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001429 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001430 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001431 heap_->ClearReferenceReferent(ref);
1432 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001433 has_enqueued = true;
1434 }
1435 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001436 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001437 if (has_enqueued) {
1438 ProcessMarkStack();
1439 }
1440 DCHECK(*list == NULL);
1441}
1442
Carl Shapiro58551df2011-07-24 03:09:51 -07001443// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001444void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1445 Object** weak_references,
1446 Object** finalizer_references,
1447 Object** phantom_references) {
1448 DCHECK(soft_references != NULL);
1449 DCHECK(weak_references != NULL);
1450 DCHECK(finalizer_references != NULL);
1451 DCHECK(phantom_references != NULL);
1452
1453 // Unless we are in the zygote or required to clear soft references
1454 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001455 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001456 PreserveSomeSoftReferences(soft_references);
1457 }
1458
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001459 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001460 // Clear all remaining soft and weak references with white
1461 // referents.
1462 ClearWhiteReferences(soft_references);
1463 ClearWhiteReferences(weak_references);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001464 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001465
1466 // Preserve all white objects with finalize methods and schedule
1467 // them for finalization.
1468 EnqueueFinalizerReferences(finalizer_references);
1469
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001470 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001471 // Clear all f-reachable soft and weak references with white
1472 // referents.
1473 ClearWhiteReferences(soft_references);
1474 ClearWhiteReferences(weak_references);
1475
1476 // Clear all phantom references with white referents.
1477 ClearWhiteReferences(phantom_references);
1478
1479 // At this point all reference lists should be empty.
1480 DCHECK(*soft_references == NULL);
1481 DCHECK(*weak_references == NULL);
1482 DCHECK(*finalizer_references == NULL);
1483 DCHECK(*phantom_references == NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001484 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001485}
1486
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001487void MarkSweep::UnBindBitmaps() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001488 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001489 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1490 // TODO: C++0x
1491 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1492 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1493 space::ContinuousSpace* space = *it;
1494 if (space->IsDlMallocSpace()) {
1495 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001496 if (alloc_space->temp_bitmap_.get() != NULL) {
1497 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001498 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001499 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1500 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1501 alloc_space->mark_bitmap_.reset(new_bitmap);
1502 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1503 }
1504 }
1505 }
1506}
1507
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001508void MarkSweep::FinishPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001509 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1510 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001511 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001512 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001513 timings_.NewSplit("EnqueueClearedReferences");
Ian Rogers1d54e732013-05-02 21:10:01 -07001514 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001515
Anwar Ghuloum46543222013-08-12 09:28:42 -07001516 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001517 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001518
Anwar Ghuloum46543222013-08-12 09:28:42 -07001519 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001520 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001521
Anwar Ghuloum46543222013-08-12 09:28:42 -07001522 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001523 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001524
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001525 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001526 total_time_ns_ += GetDurationNs();
1527 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1528 std::plus<uint64_t>());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001529 total_freed_objects_ += GetFreedObjects();
1530 total_freed_bytes_ += GetFreedBytes();
1531
1532 // Ensure that the mark stack is empty.
1533 CHECK(mark_stack_->IsEmpty());
1534
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001535 if (kCountScannedTypes) {
1536 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1537 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001538 }
1539
1540 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001541 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001542 }
1543
1544 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001545 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001546 }
1547
1548 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001549 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001550 }
1551
1552 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001553 VLOG(gc) << "Classes marked " << classes_marked_;
1554 }
1555
1556 if (kCountJavaLangRefs) {
1557 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001558 }
1559
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001560 // Update the cumulative loggers.
1561 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001562 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001563 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001564
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001565 // Clear all of the spaces' mark bitmaps.
Ian Rogers1d54e732013-05-02 21:10:01 -07001566 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1567 // TODO: C++0x
1568 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1569 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1570 space::ContinuousSpace* space = *it;
1571 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001572 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001573 }
1574 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001575 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001576
1577 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001578 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001579 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001580}
1581
Ian Rogers1d54e732013-05-02 21:10:01 -07001582} // namespace collector
1583} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001584} // namespace art