blob: e93bcd1a05c6b705813570f1b93b0a5ae2f2ea20 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Elliott Hughes07ed66b2012-12-12 18:34:25 -080024#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080025#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080027#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070028#include "gc/accounting/card_table-inl.h"
29#include "gc/accounting/heap_bitmap.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/image_space.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070035#include "indirect_reference_table.h"
36#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070037#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070038#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070040#include "mirror/art_field.h"
41#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042#include "mirror/class-inl.h"
43#include "mirror/class_loader.h"
44#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080045#include "mirror/object-inl.h"
46#include "mirror/object_array.h"
47#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070048#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070049#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070050#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070051#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Brian Carlstromea46f952013-07-30 01:26:50 -070053using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070054using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070055using ::art::mirror::Object;
56using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080057
Carl Shapiro69759ea2011-07-21 18:13:35 -070058namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070059namespace gc {
60namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070061
Mathieu Chartier02b6a782012-10-26 13:51:26 -070062// Performance options.
63static const bool kParallelMarkStack = true;
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
Mathieu Chartier858f1c52012-10-17 17:45:55 -070065static const bool kUseMarkStackPrefetch = true;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -070066static const size_t kSweepArrayChunkFreeSize = 1024;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070067
Mathieu Chartier02b6a782012-10-26 13:51:26 -070068// Profiling and information flags.
69static const bool kCountClassesMarked = false;
70static const bool kProfileLargeObjects = false;
71static const bool kMeasureOverhead = false;
72static const bool kCountTasks = false;
Mathieu Chartierd22d5482012-11-06 17:14:12 -080073static const bool kCountJavaLangRefs = false;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070074
Ian Rogers1d54e732013-05-02 21:10:01 -070075void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080076 // Bind live to mark bitmap if necessary.
77 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
78 BindLiveToMarkBitmap(space);
79 }
80
81 // Add the space to the immune region.
82 if (immune_begin_ == NULL) {
83 DCHECK(immune_end_ == NULL);
84 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
85 reinterpret_cast<Object*>(space->End()));
86 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -070087 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -070088 // Find out if the previous space is immune.
Mathieu Chartier02e25112013-08-14 16:14:24 -070089 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
90 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -070091 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080092 }
Mathieu Chartier02e25112013-08-14 16:14:24 -070093 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -070094 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -080095
Ian Rogers1d54e732013-05-02 21:10:01 -070096 // If previous space was immune, then extend the immune region. Relies on continuous spaces
97 // being sorted by Heap::AddContinuousSpace.
98 if (prev_space != NULL &&
99 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
100 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800101 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
102 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
103 }
104 }
105}
106
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800107void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700108 timings_.StartSplit("BindBitmaps");
Ian Rogers1d54e732013-05-02 21:10:01 -0700109 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800110 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
111
112 // Mark all of the spaces we never collect as immune.
Ian Rogers1d54e732013-05-02 21:10:01 -0700113 typedef std::vector<space::ContinuousSpace*>::const_iterator It;
114 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
115 space::ContinuousSpace* space = *it;
116 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800117 ImmuneSpace(space);
118 }
119 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700120 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800121}
122
Ian Rogers1d54e732013-05-02 21:10:01 -0700123MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
124 : GarbageCollector(heap,
125 name_prefix + (name_prefix.empty() ? "" : " ") +
126 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
127 current_mark_bitmap_(NULL),
128 java_lang_Class_(NULL),
129 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700130 immune_begin_(NULL),
131 immune_end_(NULL),
132 soft_reference_list_(NULL),
133 weak_reference_list_(NULL),
134 finalizer_reference_list_(NULL),
135 phantom_reference_list_(NULL),
136 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800137 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800138 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
139 mark_stack_expand_lock_("mark sweep mark stack expand lock"),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700140 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700141 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800142}
143
144void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 timings_.Reset();
Anwar Ghuloum46543222013-08-12 09:28:42 -0700146 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800147 mark_stack_ = GetHeap()->mark_stack_.get();
148 DCHECK(mark_stack_ != NULL);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800149 SetImmuneRange(NULL, NULL);
150 soft_reference_list_ = NULL;
151 weak_reference_list_ = NULL;
152 finalizer_reference_list_ = NULL;
153 phantom_reference_list_ = NULL;
154 cleared_reference_list_ = NULL;
155 freed_bytes_ = 0;
156 freed_objects_ = 0;
157 class_count_ = 0;
158 array_count_ = 0;
159 other_count_ = 0;
160 large_object_test_ = 0;
161 large_object_mark_ = 0;
162 classes_marked_ = 0;
163 overhead_time_ = 0;
164 work_chunks_created_ = 0;
165 work_chunks_deleted_ = 0;
166 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700167 java_lang_Class_ = Class::GetJavaLangClass();
168 CHECK(java_lang_Class_ != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700169
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700170 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700171
172// Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700173 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800174 heap_->PreGcVerification(this);
175}
176
177void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800178 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800179 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
180 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800181}
182
183bool MarkSweep::HandleDirtyObjectsPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700184 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800185 Thread* self = Thread::Current();
Ian Rogers1d54e732013-05-02 21:10:01 -0700186 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 Locks::mutator_lock_->AssertExclusiveHeld(self);
188
189 {
190 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
191
192 // Re-mark root set.
193 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194
195 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Ian Rogers1d54e732013-05-02 21:10:01 -0700196 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800197 }
198
199 ProcessReferences(self);
200
201 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
202 if (GetHeap()->verify_missing_card_marks_) {
203 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
204 // This second sweep makes sure that we don't have any objects in the live stack which point to
205 // freed objects. These cause problems since their references may be previously freed objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700206 SweepArray(allocation_stack, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800207 }
208 return true;
209}
210
211bool MarkSweep::IsConcurrent() const {
212 return is_concurrent_;
213}
214
215void MarkSweep::MarkingPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700216 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800217 Heap* heap = GetHeap();
218 Thread* self = Thread::Current();
219
220 BindBitmaps();
221 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700222
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800223 // Process dirty cards and add dirty cards to mod union tables.
224 heap->ProcessCards(timings_);
225
226 // Need to do this before the checkpoint since we don't want any threads to add references to
227 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700228 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800229 heap->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800230
231 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
232 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
233 // If we exclusively hold the mutator lock, all threads must be suspended.
234 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800235 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -0700236 MarkRootsCheckpoint(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800237 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800238 }
239 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240
241 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
242 MarkReachableObjects();
243}
244
245void MarkSweep::MarkReachableObjects() {
246 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
247 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700248 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700249 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800250 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
251 heap_->large_object_space_->GetLiveObjects(),
252 live_stack);
253 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700254 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800255 // Recursively mark all the non-image bits set in the mark bitmap.
256 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800257}
258
259void MarkSweep::ReclaimPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700260 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 Thread* self = Thread::Current();
262
263 if (!IsConcurrent()) {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700264 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800265 ProcessReferences(self);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700266 } else {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700267 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700268 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
269 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
270 // The allocation stack contains things allocated since the start of the GC. These may have been
271 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
272 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
273 // collection.
274 // There is a race here which is safely handled. Another thread such as the hprof could
275 // have flushed the alloc stack after we resumed the threads. This is safe however, since
276 // reseting the allocation stack zeros it out with madvise. This means that we will either
277 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
278 // first place.
279 mirror::Object** end = allocation_stack->End();
280 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
281 Object* obj = *it;
282 if (obj != NULL) {
283 UnMarkObjectNonNull(obj);
284 }
285 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800286 }
287
288 // Before freeing anything, lets verify the heap.
289 if (kIsDebugBuild) {
290 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
291 VerifyImageRoots();
292 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700293 timings_.StartSplit("PreSweepingGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800294 heap_->PreSweepingGcVerification(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700295 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800296
297 {
298 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
299
300 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700301 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800302
303 // Swap the live and mark bitmaps for each space which we modified space. This is an
304 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
305 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700306 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800307 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700308 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800309
310 // Unbind the live and mark bitmaps.
311 UnBindBitmaps();
312 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800313}
314
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800315void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
316 immune_begin_ = begin;
317 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700318}
319
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700320void MarkSweep::FindDefaultMarkBitmap() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700321 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700322 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700323 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700324 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700325 CHECK(current_mark_bitmap_ != NULL);
326 return;
327 }
328 }
329 GetHeap()->DumpSpaces();
330 LOG(FATAL) << "Could not find a default mark bitmap";
331}
332
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800333void MarkSweep::ExpandMarkStack() {
334 // Rare case, no need to have Thread::Current be a parameter.
335 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
336 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
337 // Someone else acquired the lock and expanded the mark stack before us.
338 return;
339 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700340 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800341 mark_stack_->Resize(mark_stack_->Capacity() * 2);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700342 for (const auto& obj : temp) {
343 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800344 }
345}
346
Mathieu Chartier9642c962013-08-05 17:40:36 -0700347inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800348 DCHECK(obj != NULL);
349 if (MarkObjectParallel(obj)) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700350 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
351 // Only reason a push can fail is that the mark stack is full.
352 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800353 }
354 }
355}
356
Mathieu Chartier9642c962013-08-05 17:40:36 -0700357inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
358 DCHECK(!IsImmune(obj));
359 // Try to take advantage of locality of references within a space, failing this find the space
360 // the hard way.
361 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
362 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
363 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
364 if (LIKELY(new_bitmap != NULL)) {
365 object_bitmap = new_bitmap;
366 } else {
367 MarkLargeObject(obj, false);
368 return;
369 }
370 }
371
372 DCHECK(object_bitmap->HasAddress(obj));
373 object_bitmap->Clear(obj);
374}
375
376inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700377 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700378
Mathieu Chartier9642c962013-08-05 17:40:36 -0700379 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700380 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700381 return;
382 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700383
384 // Try to take advantage of locality of references within a space, failing this find the space
385 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700386 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700387 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700388 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
389 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700390 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700391 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700392 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700393 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700394 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700395 }
396
Carl Shapiro69759ea2011-07-21 18:13:35 -0700397 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700398 if (!object_bitmap->Test(obj)) {
399 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700400 // Do we need to expand the mark stack?
401 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
402 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700403 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700404 // The object must be pushed on to the mark stack.
405 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700406 }
407}
408
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700409// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700410bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700411 // TODO: support >1 discontinuous space.
412 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
413 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700414 if (kProfileLargeObjects) {
415 ++large_object_test_;
416 }
417 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700418 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700419 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
420 LOG(ERROR) << "Attempting see if it's a bad root";
421 VerifyRoots();
422 LOG(FATAL) << "Can't mark bad root";
423 }
424 if (kProfileLargeObjects) {
425 ++large_object_mark_;
426 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700427 if (set) {
428 large_objects->Set(obj);
429 } else {
430 large_objects->Clear(obj);
431 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700432 return true;
433 }
434 return false;
435}
436
437inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
438 DCHECK(obj != NULL);
439
Mathieu Chartier9642c962013-08-05 17:40:36 -0700440 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700441 DCHECK(IsMarked(obj));
442 return false;
443 }
444
445 // Try to take advantage of locality of references within a space, failing this find the space
446 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700447 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700448 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700449 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700450 if (new_bitmap != NULL) {
451 object_bitmap = new_bitmap;
452 } else {
453 // TODO: Remove the Thread::Current here?
454 // TODO: Convert this to some kind of atomic marking?
455 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700456 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700457 }
458 }
459
460 // Return true if the object was not previously marked.
461 return !object_bitmap->AtomicTestAndSet(obj);
462}
463
Carl Shapiro69759ea2011-07-21 18:13:35 -0700464// Used to mark objects when recursing. Recursion is done by moving
465// the finger across the bitmaps in address order and marking child
466// objects. Any newly-marked objects whose addresses are lower than
467// the finger won't be visited by the bitmap scan, so those objects
468// need to be added to the mark stack.
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700469void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700470 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700471 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700472 }
473}
474
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800475void MarkSweep::MarkRoot(const Object* obj) {
476 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700477 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800478 }
479}
480
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800481void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
482 DCHECK(root != NULL);
483 DCHECK(arg != NULL);
484 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700485 mark_sweep->MarkObjectNonNullParallel(root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800486}
487
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700488void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700489 DCHECK(root != NULL);
490 DCHECK(arg != NULL);
491 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700492 mark_sweep->MarkObjectNonNull(root);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700493}
494
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700495void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
496 DCHECK(root != NULL);
497 DCHECK(arg != NULL);
498 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700499 mark_sweep->MarkObjectNonNull(root);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700500}
501
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700502void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800503 const StackVisitor* visitor) {
504 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700505}
506
Ian Rogers40e3bac2012-11-20 00:09:14 -0800507void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700508 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700509 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
510 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700511 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700512 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800513 if (visitor != NULL) {
514 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700515 }
516 }
517 }
518}
519
520void MarkSweep::VerifyRoots() {
521 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
522}
523
Carl Shapiro69759ea2011-07-21 18:13:35 -0700524// Marks all objects in the root set.
525void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700526 timings_.StartSplit("MarkRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700527 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700528 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700529}
530
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700531void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700532 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700533 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700534 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700535}
536
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700537void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700538 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700539 // Visit all runtime roots and clear dirty flags.
540 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700541 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700542}
543
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700544class CheckObjectVisitor {
545 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700546 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700547
Brian Carlstromdf629502013-07-17 22:39:56 -0700548 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800549 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800550 if (kDebugLocking) {
551 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
552 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700553 mark_sweep_->CheckReference(obj, ref, offset, is_static);
554 }
555
556 private:
557 MarkSweep* const mark_sweep_;
558};
559
560void MarkSweep::CheckObject(const Object* obj) {
561 DCHECK(obj != NULL);
562 CheckObjectVisitor visitor(this);
563 VisitObjectReferences(obj, visitor);
564}
565
566void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
567 DCHECK(root != NULL);
568 DCHECK(arg != NULL);
569 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700570 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700571 mark_sweep->CheckObject(root);
572}
573
Ian Rogers1d54e732013-05-02 21:10:01 -0700574void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
575 CHECK(space->IsDlMallocSpace());
576 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
577 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
578 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700579 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
580 alloc_space->temp_bitmap_.reset(mark_bitmap);
581 alloc_space->mark_bitmap_.reset(live_bitmap);
582}
583
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700584class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700585 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700586 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700587
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800588 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -0700589 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800590 if (kDebugLocking) {
591 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
592 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
593 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700594 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700595 }
596
597 private:
598 MarkSweep* const mark_sweep_;
599};
600
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800601void MarkSweep::ScanGrayObjects(byte minimum_age) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700602 accounting::CardTable* card_table = GetHeap()->GetCardTable();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700603 ScanObjectVisitor visitor(this);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700604 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700605 switch (space->GetGcRetentionPolicy()) {
606 case space::kGcRetentionPolicyNeverCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700607 timings_.StartSplit("ScanGrayImageSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700608 break;
609 case space::kGcRetentionPolicyFullCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700610 timings_.StartSplit("ScanGrayZygoteSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700611 break;
612 case space::kGcRetentionPolicyAlwaysCollect:
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700613 timings_.StartSplit("ScanGrayAllocSpaceObjects");
Ian Rogers1d54e732013-05-02 21:10:01 -0700614 break;
615 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700616 byte* begin = space->Begin();
617 byte* end = space->End();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700618 // Image spaces are handled properly since live == marked for them.
Ian Rogers1d54e732013-05-02 21:10:01 -0700619 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartier184e3222013-08-03 14:02:57 -0700620 card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700621 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700622 }
623}
624
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700625class CheckBitmapVisitor {
626 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700627 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700628
Brian Carlstromdf629502013-07-17 22:39:56 -0700629 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800630 if (kDebugLocking) {
631 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
632 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700633 DCHECK(obj != NULL);
634 mark_sweep_->CheckObject(obj);
635 }
636
637 private:
638 MarkSweep* mark_sweep_;
639};
640
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700641void MarkSweep::VerifyImageRoots() {
642 // Verify roots ensures that all the references inside the image space point
643 // objects which are either in the image space or marked objects in the alloc
644 // space
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700645 timings_.StartSplit("VerifyImageRoots");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700646 CheckBitmapVisitor visitor(this);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700647 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
648 if (space->IsImageSpace()) {
649 space::ImageSpace* image_space = space->AsImageSpace();
650 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
651 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
652 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700653 DCHECK(live_bitmap != NULL);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700654 live_bitmap->VisitMarkedRange(begin, end, visitor);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700655 }
656 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700657 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700658}
659
Carl Shapiro58551df2011-07-24 03:09:51 -0700660// Populates the mark stack based on the set of marked objects and
661// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800662void MarkSweep::RecursiveMark() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700663 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700664 // RecursiveMark will build the lists of known instances of the Reference classes.
665 // See DelayReferenceReferent for details.
666 CHECK(soft_reference_list_ == NULL);
667 CHECK(weak_reference_list_ == NULL);
668 CHECK(finalizer_reference_list_ == NULL);
669 CHECK(phantom_reference_list_ == NULL);
670 CHECK(cleared_reference_list_ == NULL);
671
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800672 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700673 ScanObjectVisitor scan_visitor(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800674 if (!kDisableFinger) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700675 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700676 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
677 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800678 current_mark_bitmap_ = space->GetMarkBitmap();
679 if (current_mark_bitmap_ == NULL) {
680 GetHeap()->DumpSpaces();
681 LOG(FATAL) << "invalid bitmap";
682 }
683 // This function does not handle heap end increasing, so we must use the space end.
684 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
685 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier184e3222013-08-03 14:02:57 -0700686 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700687 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700688 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700689 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700690 ProcessMarkStack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700691}
692
693bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
694 return
695 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700696 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
697}
698
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800699void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
700 ScanGrayObjects(minimum_age);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700701 ProcessMarkStack();
702}
703
Carl Shapiro58551df2011-07-24 03:09:51 -0700704void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700705 timings_.StartSplit("ReMarkRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700706 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700707 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700708}
709
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800710void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700711 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700712 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700713 for (const Object** entry : vm->weak_globals) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700714 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700715 *entry = kClearedJniWeakGlobal;
716 }
717 }
718}
719
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700720struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -0700721 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700722 MarkSweep* mark_sweep;
723};
724
725// Either marked or not live.
726bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
727 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
728 if (array_check->mark_sweep->IsMarked(object)) {
729 return true;
730 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700731 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700732 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
733}
734
Ian Rogers1d54e732013-05-02 21:10:01 -0700735void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700736 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700737 // The callbacks check
738 // !is_marked where is_marked is the callback but we want
739 // !IsMarked && IsLive
740 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
741 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700742
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700743 timings_.StartSplit("SweepSystemWeaksArray");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700744 ArrayMarkedCheck visitor;
745 visitor.live_stack = allocations;
746 visitor.mark_sweep = this;
747 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
748 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
749 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700750 timings_.EndSplit();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700751}
752
753void MarkSweep::SweepSystemWeaks() {
754 Runtime* runtime = Runtime::Current();
755 // The callbacks check
756 // !is_marked where is_marked is the callback but we want
757 // !IsMarked && IsLive
758 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
759 // Or for swapped (IsLive || !IsMarked).
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700760 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700761 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
762 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
763 SweepJniWeakGlobals(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700764 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700765}
766
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700767bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
768 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
769 // We don't actually want to sweep the object, so lets return "marked"
770 return true;
771}
772
773void MarkSweep::VerifyIsLive(const Object* obj) {
774 Heap* heap = GetHeap();
775 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700776 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700777 if (!large_object_space->GetLiveObjects()->Test(obj)) {
778 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
779 heap->allocation_stack_->End()) {
780 // Object not found!
781 heap->DumpSpaces();
782 LOG(FATAL) << "Found dead object " << obj;
783 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700784 }
785 }
786}
787
788void MarkSweep::VerifySystemWeaks() {
789 Runtime* runtime = Runtime::Current();
790 // Verify system weaks, uses a special IsMarked callback which always returns true.
791 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
792 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
793
794 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700795 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700796 for (const Object** entry : vm->weak_globals) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700797 VerifyIsLive(*entry);
798 }
799}
800
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800801struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700802 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -0700803 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700804 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800805};
806
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700807class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700808 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700809 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700810
811 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
812 // Note: self is not necessarily equal to thread since thread may be suspended.
813 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800814 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
815 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800816 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700817 mark_sweep_->GetBarrier().Pass(self);
818 }
819
820 private:
821 MarkSweep* mark_sweep_;
822};
823
Ian Rogers1d54e732013-05-02 21:10:01 -0700824void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800825 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700826 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700827 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -0700828 // Request the check point is run on all threads returning a count of the threads that must
829 // run through the barrier including self.
830 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
831 // Release locks then wait for all mutator threads to pass the barrier.
832 // TODO: optimize to not release locks when there are no threads to wait for.
833 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
834 Locks::mutator_lock_->SharedUnlock(self);
835 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
836 CHECK_EQ(old_state, kWaitingPerformingGc);
837 gc_barrier_->Increment(self, barrier_count);
838 self->SetState(kWaitingPerformingGc);
839 Locks::mutator_lock_->SharedLock(self);
840 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700841 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700842}
843
Ian Rogers30fab402012-01-23 15:43:46 -0800844void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800845 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700846 MarkSweep* mark_sweep = context->mark_sweep;
847 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -0700848 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700849 Thread* self = context->self;
850 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -0700851 // Use a bulk free, that merges consecutive objects before freeing or free per object?
852 // Documentation suggests better free performance with merging, but this may be at the expensive
853 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700854 size_t freed_objects = num_ptrs;
855 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
856 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700857 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700858 mark_sweep->freed_objects_.fetch_add(freed_objects);
859 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700860}
861
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700862void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700863 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -0700864 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700865 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700866 // We don't free any actual memory to avoid dirtying the shared zygote pages.
867 for (size_t i = 0; i < num_ptrs; ++i) {
868 Object* obj = static_cast<Object*>(ptrs[i]);
869 heap->GetLiveBitmap()->Clear(obj);
870 heap->GetCardTable()->MarkCard(obj);
871 }
872}
873
Ian Rogers1d54e732013-05-02 21:10:01 -0700874void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700875 size_t freed_bytes = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700876 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -0700877
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700878 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
879 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700880 SweepSystemWeaksArray(allocations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700881
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700882 timings_.StartSplit("Process allocation stack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700883 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
884 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -0700885 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
886 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
887 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
888 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
889 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700890 if (swap_bitmaps) {
891 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700892 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700893 }
894
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -0700895 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700896 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700897 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700898 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700899 Object** out = objects;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -0700900 Object** objects_to_chunk_free = out;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700901
902 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -0700903 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -0700904 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700905 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700906 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
907 if (LIKELY(mark_bitmap->HasAddress(obj))) {
908 if (!mark_bitmap->Test(obj)) {
909 // Don't bother un-marking since we clear the mark bitmap anyways.
910 *(out++) = obj;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -0700911 // Free objects in chunks.
912 DCHECK_GE(out, objects_to_chunk_free);
913 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
914 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
915 timings_.StartSplit("FreeList");
916 size_t chunk_freed_objects = out - objects_to_chunk_free;
917 freed_objects += chunk_freed_objects;
918 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
919 objects_to_chunk_free = out;
920 timings_.EndSplit();
921 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700922 }
923 } else if (!large_mark_objects->Test(obj)) {
924 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700925 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700926 }
927 }
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -0700928 // Free the remaining objects in chunks.
929 DCHECK_GE(out, objects_to_chunk_free);
930 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
931 if (out - objects_to_chunk_free > 0) {
932 timings_.StartSplit("FreeList");
933 size_t chunk_freed_objects = out - objects_to_chunk_free;
934 freed_objects += chunk_freed_objects;
935 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
936 timings_.EndSplit();
937 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700938 CHECK_EQ(count, allocations->Size());
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700939 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700940
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -0700941 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700942 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700943 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700944 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -0700945 freed_objects_.fetch_add(freed_objects);
946 freed_bytes_.fetch_add(freed_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700947 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -0700948
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700949 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700950 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700951 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700952}
953
Ian Rogers1d54e732013-05-02 21:10:01 -0700954void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700955 DCHECK(mark_stack_->IsEmpty());
Anwar Ghuloum46543222013-08-12 09:28:42 -0700956 base::TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700957
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700958 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
959 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700960 SweepSystemWeaks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700961
Ian Rogers1d54e732013-05-02 21:10:01 -0700962 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800963 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700964 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -0700965 scc.self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700966 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700967 // We always sweep always collect spaces.
968 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
969 if (!partial && !sweep_space) {
970 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
971 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
972 }
973 if (sweep_space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700974 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
975 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -0700976 scc.space = space->AsDlMallocSpace();
977 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
978 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700979 if (swap_bitmaps) {
980 std::swap(live_bitmap, mark_bitmap);
981 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700982 if (!space->IsZygoteSpace()) {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700983 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700984 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -0700985 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
986 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700987 } else {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700988 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700989 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
990 // memory.
991 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
992 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700993 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700994 }
995 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800996
997 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -0700998}
999
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001000void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001001 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001002 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -07001003 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1004 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1005 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001006 if (swap_bitmaps) {
1007 std::swap(large_live_objects, large_mark_objects);
1008 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001009 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001010 // O(n*log(n)) but hopefully there are not too many large objects.
1011 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001012 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -07001013 Thread* self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001014 for (const Object* obj : live_objects) {
1015 if (!large_mark_objects->Test(obj)) {
1016 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001017 ++freed_objects;
1018 }
1019 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001020 freed_objects_.fetch_add(freed_objects);
1021 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001022 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001023}
1024
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001025void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001026 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001027 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001028 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001029
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001030 bool is_marked = IsMarked(ref);
1031 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001032 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001033 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1034 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1035 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1036 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001037
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001038 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1039 DCHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001040 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001041 DCHECK(fields != NULL);
1042 bool found = false;
1043 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001044 const ArtField* cur = fields->Get(i);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001045 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1046 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1047 found = true;
1048 break;
1049 }
1050 }
1051 if (!found) {
1052 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1053 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001054
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001055 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1056 if (!obj_marked) {
1057 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1058 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1059 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001060 }
1061 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001062 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001063 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001064 }
1065}
1066
Carl Shapiro69759ea2011-07-21 18:13:35 -07001067// Process the "referent" field in a java.lang.ref.Reference. If the
1068// referent has not yet been marked, put it on the appropriate list in
1069// the gcHeap for later processing.
1070void MarkSweep::DelayReferenceReferent(Object* obj) {
1071 DCHECK(obj != NULL);
Brian Carlstrom1f870082011-08-23 16:02:11 -07001072 Class* klass = obj->GetClass();
1073 DCHECK(klass != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001074 DCHECK(klass->IsReferenceClass());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001075 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1076 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001077 if (kCountJavaLangRefs) {
1078 ++reference_count_;
1079 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001080 if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Brian Carlstrom4873d462011-08-21 15:23:39 -07001081 Object** list = NULL;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001082 if (klass->IsSoftReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001083 list = &soft_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001084 } else if (klass->IsWeakReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001085 list = &weak_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001086 } else if (klass->IsFinalizerReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001087 list = &finalizer_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001088 } else if (klass->IsPhantomReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001089 list = &phantom_reference_list_;
1090 }
Brian Carlstrom0796af02011-10-12 14:31:45 -07001091 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001092 // TODO: One lock per list?
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001093 heap_->EnqueuePendingReference(obj, list);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001094 }
1095}
1096
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001097void MarkSweep::ScanRoot(const Object* obj) {
1098 ScanObject(obj);
1099}
1100
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001101class MarkObjectVisitor {
1102 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001103 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001104
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001105 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001106 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1107 bool /* is_static */) const
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001108 NO_THREAD_SAFETY_ANALYSIS {
1109 if (kDebugLocking) {
1110 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1111 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1112 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001113 mark_sweep_->MarkObject(ref);
1114 }
1115
1116 private:
1117 MarkSweep* const mark_sweep_;
1118};
1119
Carl Shapiro69759ea2011-07-21 18:13:35 -07001120// Scans an object reference. Determines the type of the reference
1121// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001122void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001123 MarkObjectVisitor visitor(this);
1124 ScanObjectVisit(obj, visitor);
1125}
1126
1127class MarkStackChunk : public Task {
Ian Rogers1d54e732013-05-02 21:10:01 -07001128 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001129 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1130 : mark_sweep_(mark_sweep),
1131 thread_pool_(thread_pool),
1132 index_(0),
1133 length_(0),
1134 output_(NULL) {
1135 length_ = end - begin;
1136 if (begin != end) {
1137 // Cost not significant since we only do this for the initial set of mark stack chunks.
1138 memcpy(data_, begin, length_ * sizeof(*begin));
1139 }
1140 if (kCountTasks) {
1141 ++mark_sweep_->work_chunks_created_;
1142 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001143 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001144
1145 ~MarkStackChunk() {
1146 DCHECK(output_ == NULL || output_->length_ == 0);
1147 DCHECK_GE(index_, length_);
1148 delete output_;
1149 if (kCountTasks) {
1150 ++mark_sweep_->work_chunks_deleted_;
1151 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001152 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001153
1154 MarkSweep* const mark_sweep_;
1155 ThreadPool* const thread_pool_;
1156 static const size_t max_size = 1 * KB;
1157 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1158 size_t index_;
1159 // Input / output mark stack. We add newly marked references to data_ until length reaches
1160 // max_size. This is an optimization so that less tasks are created.
1161 // TODO: Investigate using a bounded buffer FIFO.
1162 Object* data_[max_size];
1163 // How many elements in data_ we need to scan.
1164 size_t length_;
1165 // Output block, newly marked references get added to the ouput block so that another thread can
1166 // scan them.
1167 MarkStackChunk* output_;
1168
1169 class MarkObjectParallelVisitor {
1170 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001171 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001172
Brian Carlstromdf629502013-07-17 22:39:56 -07001173 void operator()(const Object* /* obj */, const Object* ref,
1174 const MemberOffset& /* offset */, bool /* is_static */) const {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001175 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1176 chunk_task_->MarkStackPush(ref);
1177 }
1178 }
1179
1180 private:
1181 MarkStackChunk* const chunk_task_;
1182 };
1183
1184 // Push an object into the block.
1185 // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1186 // given time.
1187 void Push(Object* obj) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001188 CHECK(obj != NULL);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001189 data_[length_++] = obj;
1190 }
1191
1192 void MarkStackPush(const Object* obj) {
1193 if (static_cast<size_t>(length_) < max_size) {
1194 Push(const_cast<Object*>(obj));
1195 } else {
Ian Rogers1d54e732013-05-02 21:10:01 -07001196 // Internal (thread-local) buffer is full, push to a new buffer instead.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001197 if (UNLIKELY(output_ == NULL)) {
1198 AllocateOutputChunk();
1199 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1200 // Output block is full, queue it up for processing and obtain a new block.
1201 EnqueueOutput();
1202 AllocateOutputChunk();
1203 }
1204 output_->Push(const_cast<Object*>(obj));
1205 }
1206 }
1207
1208 void ScanObject(Object* obj) {
1209 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1210 }
1211
1212 void EnqueueOutput() {
1213 if (output_ != NULL) {
1214 uint64_t start = 0;
1215 if (kMeasureOverhead) {
1216 start = NanoTime();
1217 }
1218 thread_pool_->AddTask(Thread::Current(), output_);
1219 output_ = NULL;
1220 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001221 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001222 }
1223 }
1224 }
1225
1226 void AllocateOutputChunk() {
1227 uint64_t start = 0;
1228 if (kMeasureOverhead) {
1229 start = NanoTime();
1230 }
1231 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1232 if (kMeasureOverhead) {
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001233 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001234 }
1235 }
1236
1237 void Finalize() {
1238 EnqueueOutput();
1239 delete this;
1240 }
1241
1242 // Scans all of the objects
1243 virtual void Run(Thread* self) {
Brian Carlstromd74e41b2013-03-24 23:47:01 -07001244 size_t index;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001245 while ((index = index_++) < length_) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001246 if (kUseMarkStackPrefetch) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001247 static const size_t prefetch_look_ahead = 1;
1248 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001249 }
1250 Object* obj = data_[index];
1251 DCHECK(obj != NULL);
1252 ScanObject(obj);
1253 }
1254 }
1255};
1256
1257void MarkSweep::ProcessMarkStackParallel() {
1258 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1259 Thread* self = Thread::Current();
1260 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1261 // Split the current mark stack up into work tasks.
1262 const size_t num_threads = thread_pool->GetThreadCount();
1263 const size_t stack_size = mark_stack_->Size();
1264 const size_t chunk_size =
1265 std::min((stack_size + num_threads - 1) / num_threads,
1266 static_cast<size_t>(MarkStackChunk::max_size));
1267 size_t index = 0;
1268 for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1269 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1270 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1271 index += chunk_size;
1272 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1273 }
1274 thread_pool->StartWorkers(self);
Ian Rogers1d54e732013-05-02 21:10:01 -07001275 thread_pool->Wait(self, true, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001276 mark_stack_->Reset();
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001277 // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001278 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001279}
1280
Ian Rogers5d76c432011-10-31 21:42:49 -07001281// Scan anything that's on the mark stack.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001282void MarkSweep::ProcessMarkStack() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001283 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001284 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001285 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1286 ProcessMarkStackParallel();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001287 timings_.EndSplit();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001288 return;
1289 }
1290
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001291 if (kUseMarkStackPrefetch) {
1292 const size_t fifo_size = 4;
1293 const size_t fifo_mask = fifo_size - 1;
1294 const Object* fifo[fifo_size];
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001295 for (size_t i = 0; i < fifo_size; ++i) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001296 fifo[i] = NULL;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001297 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001298 size_t fifo_pos = 0;
1299 size_t fifo_count = 0;
1300 for (;;) {
1301 const Object* obj = fifo[fifo_pos & fifo_mask];
1302 if (obj != NULL) {
1303 ScanObject(obj);
1304 fifo[fifo_pos & fifo_mask] = NULL;
1305 --fifo_count;
1306 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001307
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001308 if (!mark_stack_->IsEmpty()) {
1309 const Object* obj = mark_stack_->PopBack();
1310 DCHECK(obj != NULL);
1311 fifo[fifo_pos & fifo_mask] = obj;
1312 __builtin_prefetch(obj);
1313 fifo_count++;
1314 }
1315 fifo_pos++;
1316
1317 if (!fifo_count) {
1318 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1319 break;
1320 }
1321 }
1322 } else {
1323 while (!mark_stack_->IsEmpty()) {
1324 const Object* obj = mark_stack_->PopBack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001325 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001326 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001327 }
1328 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001329 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001330}
1331
Carl Shapiro69759ea2011-07-21 18:13:35 -07001332// Walks the reference list marking any references subject to the
1333// reference clearing policy. References with a black referent are
1334// removed from the list. References with white referents biased
1335// toward saving are blackened and also removed from the list.
1336void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1337 DCHECK(list != NULL);
1338 Object* clear = NULL;
1339 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001340
1341 DCHECK(mark_stack_->IsEmpty());
1342
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001343 timings_.StartSplit("PreserveSomeSoftReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001344 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001345 Object* ref = heap_->DequeuePendingReference(list);
1346 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001347 if (referent == NULL) {
1348 // Referent was cleared by the user during marking.
1349 continue;
1350 }
1351 bool is_marked = IsMarked(referent);
1352 if (!is_marked && ((++counter) & 1)) {
1353 // Referent is white and biased toward saving, mark it.
1354 MarkObject(referent);
1355 is_marked = true;
1356 }
1357 if (!is_marked) {
1358 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001359 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001360 }
1361 }
1362 *list = clear;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001363 timings_.EndSplit();
1364
Carl Shapiro69759ea2011-07-21 18:13:35 -07001365 // Restart the mark with the newly black references added to the
1366 // root set.
1367 ProcessMarkStack();
1368}
1369
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001370inline bool MarkSweep::IsMarked(const Object* object) const
1371 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001372 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001373 return true;
1374 }
1375 DCHECK(current_mark_bitmap_ != NULL);
1376 if (current_mark_bitmap_->HasAddress(object)) {
1377 return current_mark_bitmap_->Test(object);
1378 }
1379 return heap_->GetMarkBitmap()->Test(object);
1380}
1381
1382
Carl Shapiro69759ea2011-07-21 18:13:35 -07001383// Unlink the reference list clearing references objects with white
1384// referents. Cleared references registered to a reference queue are
1385// scheduled for appending by the heap worker thread.
1386void MarkSweep::ClearWhiteReferences(Object** list) {
1387 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001388 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001389 Object* ref = heap_->DequeuePendingReference(list);
1390 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001391 if (referent != NULL && !IsMarked(referent)) {
1392 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001393 heap_->ClearReferenceReferent(ref);
1394 if (heap_->IsEnqueuable(ref)) {
1395 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001396 }
1397 }
1398 }
1399 DCHECK(*list == NULL);
1400}
1401
1402// Enqueues finalizer references with white referents. White
1403// referents are blackened, moved to the zombie field, and the
1404// referent field is cleared.
1405void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1406 DCHECK(list != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001407 timings_.StartSplit("EnqueueFinalizerReferences");
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001408 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001409 bool has_enqueued = false;
1410 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001411 Object* ref = heap_->DequeuePendingReference(list);
1412 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001413 if (referent != NULL && !IsMarked(referent)) {
1414 MarkObject(referent);
1415 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001416 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001417 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001418 heap_->ClearReferenceReferent(ref);
1419 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001420 has_enqueued = true;
1421 }
1422 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001423 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001424 if (has_enqueued) {
1425 ProcessMarkStack();
1426 }
1427 DCHECK(*list == NULL);
1428}
1429
Carl Shapiro58551df2011-07-24 03:09:51 -07001430// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001431void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1432 Object** weak_references,
1433 Object** finalizer_references,
1434 Object** phantom_references) {
1435 DCHECK(soft_references != NULL);
1436 DCHECK(weak_references != NULL);
1437 DCHECK(finalizer_references != NULL);
1438 DCHECK(phantom_references != NULL);
1439
1440 // Unless we are in the zygote or required to clear soft references
1441 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001442 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001443 PreserveSomeSoftReferences(soft_references);
1444 }
1445
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001446 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001447 // Clear all remaining soft and weak references with white
1448 // referents.
1449 ClearWhiteReferences(soft_references);
1450 ClearWhiteReferences(weak_references);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001451 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001452
1453 // Preserve all white objects with finalize methods and schedule
1454 // them for finalization.
1455 EnqueueFinalizerReferences(finalizer_references);
1456
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001457 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001458 // Clear all f-reachable soft and weak references with white
1459 // referents.
1460 ClearWhiteReferences(soft_references);
1461 ClearWhiteReferences(weak_references);
1462
1463 // Clear all phantom references with white referents.
1464 ClearWhiteReferences(phantom_references);
1465
1466 // At this point all reference lists should be empty.
1467 DCHECK(*soft_references == NULL);
1468 DCHECK(*weak_references == NULL);
1469 DCHECK(*finalizer_references == NULL);
1470 DCHECK(*phantom_references == NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001471 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001472}
1473
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001474void MarkSweep::UnBindBitmaps() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001475 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001476 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001477 if (space->IsDlMallocSpace()) {
1478 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001479 if (alloc_space->temp_bitmap_.get() != NULL) {
1480 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001481 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001482 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1483 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1484 alloc_space->mark_bitmap_.reset(new_bitmap);
1485 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1486 }
1487 }
1488 }
1489}
1490
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001491void MarkSweep::FinishPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001492 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1493 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001494 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001495 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001496 timings_.NewSplit("EnqueueClearedReferences");
Ian Rogers1d54e732013-05-02 21:10:01 -07001497 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001498
Anwar Ghuloum46543222013-08-12 09:28:42 -07001499 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001500 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001501
Anwar Ghuloum46543222013-08-12 09:28:42 -07001502 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001503 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001504
Anwar Ghuloum46543222013-08-12 09:28:42 -07001505 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001506 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001507
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001508 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001509 total_time_ns_ += GetDurationNs();
1510 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1511 std::plus<uint64_t>());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001512 total_freed_objects_ += GetFreedObjects();
1513 total_freed_bytes_ += GetFreedBytes();
1514
1515 // Ensure that the mark stack is empty.
1516 CHECK(mark_stack_->IsEmpty());
1517
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001518 if (kCountScannedTypes) {
1519 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1520 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001521 }
1522
1523 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001524 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001525 }
1526
1527 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001528 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001529 }
1530
1531 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001532 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001533 }
1534
1535 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001536 VLOG(gc) << "Classes marked " << classes_marked_;
1537 }
1538
1539 if (kCountJavaLangRefs) {
1540 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001541 }
1542
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001543 // Update the cumulative loggers.
1544 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001545 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001546 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001547
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001548 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001549 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001550 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001551 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001552 }
1553 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001554 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001555
1556 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001557 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001558 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001559}
1560
Ian Rogers1d54e732013-05-02 21:10:01 -07001561} // namespace collector
1562} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001563} // namespace art