blob: 5c31eb107853d2dc63eacd480d158cb81c43ddf8 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
31#include "gc/accounting/space_bitmap-inl.h"
32#include "gc/heap.h"
33#include "gc/space/image_space.h"
34#include "gc/space/large_object_space.h"
35#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070036#include "indirect_reference_table.h"
37#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070038#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070039#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070041#include "mirror/art_field.h"
42#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "mirror/class-inl.h"
44#include "mirror/class_loader.h"
45#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080046#include "mirror/object-inl.h"
47#include "mirror/object_array.h"
48#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070049#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070050#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070051#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070052#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070053
Brian Carlstromea46f952013-07-30 01:26:50 -070054using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070055using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070056using ::art::mirror::Object;
57using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080058
Carl Shapiro69759ea2011-07-21 18:13:35 -070059namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070060namespace gc {
61namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070062
Mathieu Chartier02b6a782012-10-26 13:51:26 -070063// Performance options.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070064constexpr bool kUseRecursiveMark = false;
65constexpr bool kUseMarkStackPrefetch = true;
66constexpr size_t kSweepArrayChunkFreeSize = 1024;
67
68// Parallelism options.
69constexpr bool kParallelCardScan = true;
70constexpr bool kParallelRecursiveMark = true;
71// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
72// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
73// having this can add overhead in ProcessReferences since we may end up doing many calls of
74// ProcessMarkStack with very small mark stacks.
75constexpr size_t kMinimumParallelMarkStackSize = 128;
76constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070077
Mathieu Chartier02b6a782012-10-26 13:51:26 -070078// Profiling and information flags.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070079constexpr bool kCountClassesMarked = false;
80constexpr bool kProfileLargeObjects = false;
81constexpr bool kMeasureOverhead = false;
82constexpr bool kCountTasks = false;
83constexpr bool kCountJavaLangRefs = false;
84
85// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
86constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070087
Ian Rogers1d54e732013-05-02 21:10:01 -070088void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080089 // Bind live to mark bitmap if necessary.
90 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
91 BindLiveToMarkBitmap(space);
92 }
93
94 // Add the space to the immune region.
95 if (immune_begin_ == NULL) {
96 DCHECK(immune_end_ == NULL);
97 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
98 reinterpret_cast<Object*>(space->End()));
99 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700100 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -0700101 // Find out if the previous space is immune.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700102 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
103 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700104 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800105 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700106 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 // If previous space was immune, then extend the immune region. Relies on continuous spaces
109 // being sorted by Heap::AddContinuousSpace.
110 if (prev_space != NULL &&
111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
115 }
116 }
117}
118
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800119void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700120 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800122 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700123 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700124 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800125 ImmuneSpace(space);
126 }
127 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700128 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129}
130
Ian Rogers1d54e732013-05-02 21:10:01 -0700131MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
132 : GarbageCollector(heap,
133 name_prefix + (name_prefix.empty() ? "" : " ") +
134 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
135 current_mark_bitmap_(NULL),
136 java_lang_Class_(NULL),
137 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700138 immune_begin_(NULL),
139 immune_end_(NULL),
140 soft_reference_list_(NULL),
141 weak_reference_list_(NULL),
142 finalizer_reference_list_(NULL),
143 phantom_reference_list_(NULL),
144 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800145 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800146 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
147 mark_stack_expand_lock_("mark sweep mark stack expand lock"),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700148 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700149 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800150}
151
152void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700153 timings_.Reset();
Anwar Ghuloum46543222013-08-12 09:28:42 -0700154 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800155 mark_stack_ = GetHeap()->mark_stack_.get();
156 DCHECK(mark_stack_ != NULL);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800157 SetImmuneRange(NULL, NULL);
158 soft_reference_list_ = NULL;
159 weak_reference_list_ = NULL;
160 finalizer_reference_list_ = NULL;
161 phantom_reference_list_ = NULL;
162 cleared_reference_list_ = NULL;
163 freed_bytes_ = 0;
164 freed_objects_ = 0;
165 class_count_ = 0;
166 array_count_ = 0;
167 other_count_ = 0;
168 large_object_test_ = 0;
169 large_object_mark_ = 0;
170 classes_marked_ = 0;
171 overhead_time_ = 0;
172 work_chunks_created_ = 0;
173 work_chunks_deleted_ = 0;
174 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700175 java_lang_Class_ = Class::GetJavaLangClass();
176 CHECK(java_lang_Class_ != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700177
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700178 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700179
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700180 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700181 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800182 heap_->PreGcVerification(this);
183}
184
185void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
188 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189}
190
191bool MarkSweep::HandleDirtyObjectsPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700192 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800193 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194 Locks::mutator_lock_->AssertExclusiveHeld(self);
195
196 {
197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
198
199 // Re-mark root set.
200 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800201
202 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700203 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800204 }
205
206 ProcessReferences(self);
207
208 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
209 if (GetHeap()->verify_missing_card_marks_) {
210 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
211 // This second sweep makes sure that we don't have any objects in the live stack which point to
212 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700213 SweepArray(GetHeap()->allocation_stack_.get(), false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800214 }
215 return true;
216}
217
218bool MarkSweep::IsConcurrent() const {
219 return is_concurrent_;
220}
221
222void MarkSweep::MarkingPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700223 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800224 Heap* heap = GetHeap();
225 Thread* self = Thread::Current();
226
227 BindBitmaps();
228 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700229
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800230 // Process dirty cards and add dirty cards to mod union tables.
231 heap->ProcessCards(timings_);
232
233 // Need to do this before the checkpoint since we don't want any threads to add references to
234 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700235 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800236 heap->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800237
238 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
239 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
240 // If we exclusively hold the mutator lock, all threads must be suspended.
241 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800242 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700243 MarkThreadRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800244 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800245 }
246 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800247
248 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
249 MarkReachableObjects();
250}
251
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700252void MarkSweep::MarkThreadRoots(Thread* self) {
253 MarkRootsCheckpoint(self);
254}
255
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800256void MarkSweep::MarkReachableObjects() {
257 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
258 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700259 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700260 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700262 heap_->large_object_space_->GetLiveObjects(), live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700264 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800265 // Recursively mark all the non-image bits set in the mark bitmap.
266 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800267}
268
269void MarkSweep::ReclaimPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700270 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700271 auto* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800272
273 if (!IsConcurrent()) {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700274 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800275 ProcessReferences(self);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700276 } else {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700277 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700278 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
279 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
280 // The allocation stack contains things allocated since the start of the GC. These may have been
281 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
282 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
283 // collection.
284 // There is a race here which is safely handled. Another thread such as the hprof could
285 // have flushed the alloc stack after we resumed the threads. This is safe however, since
286 // reseting the allocation stack zeros it out with madvise. This means that we will either
287 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
288 // first place.
289 mirror::Object** end = allocation_stack->End();
290 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700291 const Object* obj = *it;
Mathieu Chartier9642c962013-08-05 17:40:36 -0700292 if (obj != NULL) {
293 UnMarkObjectNonNull(obj);
294 }
295 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800296 }
297
298 // Before freeing anything, lets verify the heap.
299 if (kIsDebugBuild) {
300 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
301 VerifyImageRoots();
302 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700303 timings_.StartSplit("PreSweepingGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800304 heap_->PreSweepingGcVerification(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700305 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800306
307 {
308 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
309
310 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700311 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800312
313 // Swap the live and mark bitmaps for each space which we modified space. This is an
314 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
315 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700316 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800317 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700318 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800319
320 // Unbind the live and mark bitmaps.
321 UnBindBitmaps();
322 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800323}
324
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800325void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
326 immune_begin_ = begin;
327 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700328}
329
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700330void MarkSweep::FindDefaultMarkBitmap() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700331 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700332 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700333 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700334 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700335 CHECK(current_mark_bitmap_ != NULL);
336 return;
337 }
338 }
339 GetHeap()->DumpSpaces();
340 LOG(FATAL) << "Could not find a default mark bitmap";
341}
342
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800343void MarkSweep::ExpandMarkStack() {
344 // Rare case, no need to have Thread::Current be a parameter.
345 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
346 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
347 // Someone else acquired the lock and expanded the mark stack before us.
348 return;
349 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700350 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800351 mark_stack_->Resize(mark_stack_->Capacity() * 2);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700352 for (const auto& obj : temp) {
353 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800354 }
355}
356
Mathieu Chartier9642c962013-08-05 17:40:36 -0700357inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800358 DCHECK(obj != NULL);
359 if (MarkObjectParallel(obj)) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700360 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
361 // Only reason a push can fail is that the mark stack is full.
362 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800363 }
364 }
365}
366
Mathieu Chartier9642c962013-08-05 17:40:36 -0700367inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
368 DCHECK(!IsImmune(obj));
369 // Try to take advantage of locality of references within a space, failing this find the space
370 // the hard way.
371 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
372 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
373 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
374 if (LIKELY(new_bitmap != NULL)) {
375 object_bitmap = new_bitmap;
376 } else {
377 MarkLargeObject(obj, false);
378 return;
379 }
380 }
381
382 DCHECK(object_bitmap->HasAddress(obj));
383 object_bitmap->Clear(obj);
384}
385
386inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700387 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700388
Mathieu Chartier9642c962013-08-05 17:40:36 -0700389 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700390 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700391 return;
392 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700393
394 // Try to take advantage of locality of references within a space, failing this find the space
395 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700396 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700397 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700398 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
399 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700400 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700401 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700402 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700403 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700404 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700405 }
406
Carl Shapiro69759ea2011-07-21 18:13:35 -0700407 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700408 if (!object_bitmap->Test(obj)) {
409 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700410 // Do we need to expand the mark stack?
411 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
412 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700413 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700414 // The object must be pushed on to the mark stack.
415 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700416 }
417}
418
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700419// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700420bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700421 // TODO: support >1 discontinuous space.
422 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
423 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700424 if (kProfileLargeObjects) {
425 ++large_object_test_;
426 }
427 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700428 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700429 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
430 LOG(ERROR) << "Attempting see if it's a bad root";
431 VerifyRoots();
432 LOG(FATAL) << "Can't mark bad root";
433 }
434 if (kProfileLargeObjects) {
435 ++large_object_mark_;
436 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700437 if (set) {
438 large_objects->Set(obj);
439 } else {
440 large_objects->Clear(obj);
441 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700442 return true;
443 }
444 return false;
445}
446
447inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
448 DCHECK(obj != NULL);
449
Mathieu Chartier9642c962013-08-05 17:40:36 -0700450 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700451 DCHECK(IsMarked(obj));
452 return false;
453 }
454
455 // Try to take advantage of locality of references within a space, failing this find the space
456 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700457 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700458 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700459 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700460 if (new_bitmap != NULL) {
461 object_bitmap = new_bitmap;
462 } else {
463 // TODO: Remove the Thread::Current here?
464 // TODO: Convert this to some kind of atomic marking?
465 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700466 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700467 }
468 }
469
470 // Return true if the object was not previously marked.
471 return !object_bitmap->AtomicTestAndSet(obj);
472}
473
Carl Shapiro69759ea2011-07-21 18:13:35 -0700474// Used to mark objects when recursing. Recursion is done by moving
475// the finger across the bitmaps in address order and marking child
476// objects. Any newly-marked objects whose addresses are lower than
477// the finger won't be visited by the bitmap scan, so those objects
478// need to be added to the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700479inline void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700480 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700481 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700482 }
483}
484
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800485void MarkSweep::MarkRoot(const Object* obj) {
486 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700487 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800488 }
489}
490
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800491void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
492 DCHECK(root != NULL);
493 DCHECK(arg != NULL);
494 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700495 mark_sweep->MarkObjectNonNullParallel(root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800496}
497
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700498void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700499 DCHECK(root != NULL);
500 DCHECK(arg != NULL);
501 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700502 mark_sweep->MarkObjectNonNull(root);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700503}
504
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700505void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
506 DCHECK(root != NULL);
507 DCHECK(arg != NULL);
508 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700509 mark_sweep->MarkObjectNonNull(root);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700510}
511
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700512void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800513 const StackVisitor* visitor) {
514 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700515}
516
Ian Rogers40e3bac2012-11-20 00:09:14 -0800517void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700518 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700519 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
520 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700521 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700522 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800523 if (visitor != NULL) {
524 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700525 }
526 }
527 }
528}
529
530void MarkSweep::VerifyRoots() {
531 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
532}
533
Carl Shapiro69759ea2011-07-21 18:13:35 -0700534// Marks all objects in the root set.
535void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700536 timings_.StartSplit("MarkRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700537 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700538 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700539}
540
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700541void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700542 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700543 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700544 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700545}
546
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700547void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700548 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700549 // Visit all runtime roots and clear dirty flags.
550 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700551 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700552}
553
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700554void MarkSweep::CheckObject(const Object* obj) {
555 DCHECK(obj != NULL);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700556 VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
557 bool is_static) NO_THREAD_SAFETY_ANALYSIS {
558 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
559 CheckReference(obj, ref, offset, is_static);
560 });
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700561}
562
563void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
564 DCHECK(root != NULL);
565 DCHECK(arg != NULL);
566 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700567 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700568 mark_sweep->CheckObject(root);
569}
570
Ian Rogers1d54e732013-05-02 21:10:01 -0700571void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
572 CHECK(space->IsDlMallocSpace());
573 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
574 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
575 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700576 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
577 alloc_space->temp_bitmap_.reset(mark_bitmap);
578 alloc_space->mark_bitmap_.reset(live_bitmap);
579}
580
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700581class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700582 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700583 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
584 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700585
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800586 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700587 void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
588 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800589 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
590 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
591 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700592 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700593 }
594
595 private:
596 MarkSweep* const mark_sweep_;
597};
598
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700599template <bool kUseFinger = false>
600class MarkStackTask : public Task {
601 public:
602 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
603 const Object** mark_stack)
604 : mark_sweep_(mark_sweep),
605 thread_pool_(thread_pool),
606 mark_stack_pos_(mark_stack_size) {
607 // We may have to copy part of an existing mark stack when another mark stack overflows.
608 if (mark_stack_size != 0) {
609 DCHECK(mark_stack != NULL);
610 // TODO: Check performance?
611 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700612 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700613 if (kCountTasks) {
614 ++mark_sweep_->work_chunks_created_;
615 }
616 }
617
618 static const size_t kMaxSize = 1 * KB;
619
620 protected:
621 class ScanObjectParallelVisitor {
622 public:
623 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
624 : chunk_task_(chunk_task) {}
625
626 void operator()(const Object* obj) const {
627 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
628 mark_sweep->ScanObjectVisit(obj,
629 [mark_sweep, this](const Object* /* obj */, const Object* ref,
630 const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
631 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
632 if (kUseFinger) {
633 android_memory_barrier();
634 if (reinterpret_cast<uintptr_t>(ref) >=
635 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
636 return;
637 }
638 }
639 chunk_task_->MarkStackPush(ref);
640 }
641 });
642 }
643
644 private:
645 MarkStackTask<kUseFinger>* const chunk_task_;
646 };
647
648 virtual ~MarkStackTask() {
649 // Make sure that we have cleared our mark stack.
650 DCHECK_EQ(mark_stack_pos_, 0U);
651 if (kCountTasks) {
652 ++mark_sweep_->work_chunks_deleted_;
653 }
654 }
655
656 MarkSweep* const mark_sweep_;
657 ThreadPool* const thread_pool_;
658 // Thread local mark stack for this task.
659 const Object* mark_stack_[kMaxSize];
660 // Mark stack position.
661 size_t mark_stack_pos_;
662
663 void MarkStackPush(const Object* obj) ALWAYS_INLINE {
664 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
665 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
666 mark_stack_pos_ /= 2;
667 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
668 mark_stack_ + mark_stack_pos_);
669 thread_pool_->AddTask(Thread::Current(), task);
670 }
671 DCHECK(obj != nullptr);
672 DCHECK(mark_stack_pos_ < kMaxSize);
673 mark_stack_[mark_stack_pos_++] = obj;
674 }
675
676 virtual void Finalize() {
677 delete this;
678 }
679
680 // Scans all of the objects
681 virtual void Run(Thread* self) {
682 ScanObjectParallelVisitor visitor(this);
683 // TODO: Tune this.
684 static const size_t kFifoSize = 4;
685 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
686 for (;;) {
687 const Object* obj = NULL;
688 if (kUseMarkStackPrefetch) {
689 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
690 const Object* obj = mark_stack_[--mark_stack_pos_];
691 DCHECK(obj != NULL);
692 __builtin_prefetch(obj);
693 prefetch_fifo.push_back(obj);
694 }
695 if (UNLIKELY(prefetch_fifo.empty())) {
696 break;
697 }
698 obj = prefetch_fifo.front();
699 prefetch_fifo.pop_front();
700 } else {
701 if (UNLIKELY(mark_stack_pos_ == 0)) {
702 break;
703 }
704 obj = mark_stack_[--mark_stack_pos_];
705 }
706 DCHECK(obj != NULL);
707 visitor(obj);
708 }
709 }
710};
711
712class CardScanTask : public MarkStackTask<false> {
713 public:
714 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
715 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
716 const Object** mark_stack_obj)
717 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
718 bitmap_(bitmap),
719 begin_(begin),
720 end_(end),
721 minimum_age_(minimum_age) {
722 }
723
724 protected:
725 accounting::SpaceBitmap* const bitmap_;
726 byte* const begin_;
727 byte* const end_;
728 const byte minimum_age_;
729
730 virtual void Finalize() {
731 delete this;
732 }
733
734 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
735 ScanObjectParallelVisitor visitor(this);
736 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
737 card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
738 // Finish by emptying our local mark stack.
739 MarkStackTask::Run(self);
740 }
741};
742
743void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
744 accounting::CardTable* card_table = GetHeap()->GetCardTable();
745 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
746 const bool parallel = kParallelCardScan && thread_pool != nullptr;
747 if (parallel) {
748 auto* self = Thread::Current();
749 // Can't have a different split for each space since multiple spaces can have their cards being
750 // scanned at the same time.
751 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
752 // Try to take some of the mark stack since we can pass this off to the worker tasks.
753 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
754 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
755 const auto mark_stack_size = mark_stack_end - mark_stack_begin;
756 const size_t thread_count = thread_pool->GetThreadCount() + 1;
757 // Estimated number of work tasks we will create.
758 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
759 DCHECK_NE(mark_stack_tasks, 0U);
760 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
761 mark_stack_size / mark_stack_tasks + 1);
762 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
763 byte* card_begin = space->Begin();
764 byte* card_end = space->End();
765 // Calculate how many bytes of heap we will scan,
766 const size_t address_range = card_end - card_begin;
767 // Calculate how much address range each task gets.
768 const size_t card_delta = RoundUp(address_range / thread_count + 1,
769 accounting::CardTable::kCardSize);
770 // Create the worker tasks for this space.
771 while (card_begin != card_end) {
772 // Add a range of cards.
773 size_t addr_remaining = card_end - card_begin;
774 size_t card_increment = std::min(card_delta, addr_remaining);
775 // Take from the back of the mark stack.
776 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
777 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
778 mark_stack_end -= mark_stack_increment;
779 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
780 DCHECK_EQ(mark_stack_end, mark_stack_->End());
781 // Add the new task to the thread pool.
782 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
783 card_begin + card_increment, minimum_age,
784 mark_stack_increment, mark_stack_end);
785 thread_pool->AddTask(self, task);
786 card_begin += card_increment;
787 }
788 }
789 thread_pool->StartWorkers(self);
790 thread_pool->Wait(self, paused, true); // Only do work in the main thread if we are paused.
791 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700792 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700793 } else {
794 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
795 // Image spaces are handled properly since live == marked for them.
796 switch (space->GetGcRetentionPolicy()) {
797 case space::kGcRetentionPolicyNeverCollect:
798 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
799 "ScanGrayImageSpaceObjects");
800 break;
801 case space::kGcRetentionPolicyFullCollect:
802 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
803 "ScanGrayZygoteSpaceObjects");
804 break;
805 case space::kGcRetentionPolicyAlwaysCollect:
806 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
807 "ScanGrayAllocSpaceObjects");
808 break;
809 }
810 ScanObjectVisitor visitor(this);
811 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
812 timings_.EndSplit();
813 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700814 }
815}
816
817void MarkSweep::VerifyImageRoots() {
818 // Verify roots ensures that all the references inside the image space point
819 // objects which are either in the image space or marked objects in the alloc
820 // space
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700821 timings_.StartSplit("VerifyImageRoots");
Mathieu Chartier02e25112013-08-14 16:14:24 -0700822 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
823 if (space->IsImageSpace()) {
824 space::ImageSpace* image_space = space->AsImageSpace();
825 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
826 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
827 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700828 DCHECK(live_bitmap != NULL);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700829 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
830 if (kCheckLocks) {
831 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
832 }
833 DCHECK(obj != NULL);
834 CheckObject(obj);
835 });
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700836 }
837 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700838 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700839}
840
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700841class RecursiveMarkTask : public MarkStackTask<false> {
842 public:
843 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
844 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
845 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
846 bitmap_(bitmap),
847 begin_(begin),
848 end_(end) {
849 }
850
851 protected:
852 accounting::SpaceBitmap* const bitmap_;
853 const uintptr_t begin_;
854 const uintptr_t end_;
855
856 virtual void Finalize() {
857 delete this;
858 }
859
860 // Scans all of the objects
861 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
862 ScanObjectParallelVisitor visitor(this);
863 bitmap_->VisitMarkedRange(begin_, end_, visitor);
864 // Finish by emptying our local mark stack.
865 MarkStackTask::Run(self);
866 }
867};
868
Carl Shapiro58551df2011-07-24 03:09:51 -0700869// Populates the mark stack based on the set of marked objects and
870// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800871void MarkSweep::RecursiveMark() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700872 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700873 // RecursiveMark will build the lists of known instances of the Reference classes.
874 // See DelayReferenceReferent for details.
875 CHECK(soft_reference_list_ == NULL);
876 CHECK(weak_reference_list_ == NULL);
877 CHECK(finalizer_reference_list_ == NULL);
878 CHECK(phantom_reference_list_ == NULL);
879 CHECK(cleared_reference_list_ == NULL);
880
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700881 if (kUseRecursiveMark) {
882 const bool partial = GetGcType() == kGcTypePartial;
883 ScanObjectVisitor scan_visitor(this);
884 auto* self = Thread::Current();
885 ThreadPool* thread_pool = heap_->GetThreadPool();
886 const bool parallel = kParallelRecursiveMark && thread_pool != NULL;
887 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700888 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700889 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
890 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800891 current_mark_bitmap_ = space->GetMarkBitmap();
892 if (current_mark_bitmap_ == NULL) {
893 GetHeap()->DumpSpaces();
894 LOG(FATAL) << "invalid bitmap";
895 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700896 if (parallel) {
897 // We will use the mark stack the future.
898 // CHECK(mark_stack_->IsEmpty());
899 // This function does not handle heap end increasing, so we must use the space end.
900 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
901 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
902 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
903
904 // Create a few worker tasks.
905 size_t n = (thread_pool->GetThreadCount() + 1) * 2;
906 while (begin != end) {
907 uintptr_t start = begin;
908 uintptr_t delta = (end - begin) / n;
909 delta = RoundUp(delta, KB);
910 if (delta < 16 * KB) delta = end - begin;
911 begin += delta;
912 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
913 begin);
914 thread_pool->AddTask(self, task);
915 }
916 thread_pool->StartWorkers(self);
917 thread_pool->Wait(self, false, true);
918 thread_pool->StopWorkers(self);
919 } else {
920 // This function does not handle heap end increasing, so we must use the space end.
921 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
922 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
923 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
924 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700925 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700926 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700927 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700928 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700929}
930
931bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
932 return
933 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700934 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
935}
936
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700937void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
938 ScanGrayObjects(paused, minimum_age);
939 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700940}
941
Carl Shapiro58551df2011-07-24 03:09:51 -0700942void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700943 timings_.StartSplit("ReMarkRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700944 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700945 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700946}
947
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800948void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700949 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700950 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700951 for (const Object** entry : vm->weak_globals) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700952 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700953 *entry = kClearedJniWeakGlobal;
954 }
955 }
956}
957
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700958struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -0700959 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700960 MarkSweep* mark_sweep;
961};
962
963// Either marked or not live.
964bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
965 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
966 if (array_check->mark_sweep->IsMarked(object)) {
967 return true;
968 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700969 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700970 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
971}
972
Ian Rogers1d54e732013-05-02 21:10:01 -0700973void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700974 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700975 // The callbacks check
976 // !is_marked where is_marked is the callback but we want
977 // !IsMarked && IsLive
978 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
979 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700980
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700981 timings_.StartSplit("SweepSystemWeaksArray");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700982 ArrayMarkedCheck visitor;
983 visitor.live_stack = allocations;
984 visitor.mark_sweep = this;
985 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
986 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
987 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700988 timings_.EndSplit();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700989}
990
991void MarkSweep::SweepSystemWeaks() {
992 Runtime* runtime = Runtime::Current();
993 // The callbacks check
994 // !is_marked where is_marked is the callback but we want
995 // !IsMarked && IsLive
996 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
997 // Or for swapped (IsLive || !IsMarked).
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700998 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700999 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
1000 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
1001 SweepJniWeakGlobals(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001002 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -07001003}
1004
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001005bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
1006 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1007 // We don't actually want to sweep the object, so lets return "marked"
1008 return true;
1009}
1010
1011void MarkSweep::VerifyIsLive(const Object* obj) {
1012 Heap* heap = GetHeap();
1013 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001014 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001015 if (!large_object_space->GetLiveObjects()->Test(obj)) {
1016 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1017 heap->allocation_stack_->End()) {
1018 // Object not found!
1019 heap->DumpSpaces();
1020 LOG(FATAL) << "Found dead object " << obj;
1021 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001022 }
1023 }
1024}
1025
1026void MarkSweep::VerifySystemWeaks() {
1027 Runtime* runtime = Runtime::Current();
1028 // Verify system weaks, uses a special IsMarked callback which always returns true.
1029 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
1030 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
1031
1032 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -07001033 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001034 for (const Object** entry : vm->weak_globals) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001035 VerifyIsLive(*entry);
1036 }
1037}
1038
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001039struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001040 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -07001041 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001042 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001043};
1044
Mathieu Chartier0e4627e2012-10-23 16:13:36 -07001045class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001046 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001047 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001048
1049 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1050 // Note: self is not necessarily equal to thread since thread may be suspended.
1051 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001052 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1053 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -08001054 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001055 mark_sweep_->GetBarrier().Pass(self);
1056 }
1057
1058 private:
1059 MarkSweep* mark_sweep_;
1060};
1061
Ian Rogers1d54e732013-05-02 21:10:01 -07001062void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001063 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001064 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001065 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001066 // Request the check point is run on all threads returning a count of the threads that must
1067 // run through the barrier including self.
1068 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1069 // Release locks then wait for all mutator threads to pass the barrier.
1070 // TODO: optimize to not release locks when there are no threads to wait for.
1071 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1072 Locks::mutator_lock_->SharedUnlock(self);
1073 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1074 CHECK_EQ(old_state, kWaitingPerformingGc);
1075 gc_barrier_->Increment(self, barrier_count);
1076 self->SetState(kWaitingPerformingGc);
1077 Locks::mutator_lock_->SharedLock(self);
1078 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001079 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001080}
1081
Ian Rogers30fab402012-01-23 15:43:46 -08001082void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001083 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001084 MarkSweep* mark_sweep = context->mark_sweep;
1085 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -07001086 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001087 Thread* self = context->self;
1088 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -07001089 // Use a bulk free, that merges consecutive objects before freeing or free per object?
1090 // Documentation suggests better free performance with merging, but this may be at the expensive
1091 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001092 size_t freed_objects = num_ptrs;
1093 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
1094 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001095 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001096 mark_sweep->freed_objects_.fetch_add(freed_objects);
1097 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001098}
1099
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001100void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001101 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -07001102 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001103 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001104 // We don't free any actual memory to avoid dirtying the shared zygote pages.
1105 for (size_t i = 0; i < num_ptrs; ++i) {
1106 Object* obj = static_cast<Object*>(ptrs[i]);
1107 heap->GetLiveBitmap()->Clear(obj);
1108 heap->GetCardTable()->MarkCard(obj);
1109 }
1110}
1111
Ian Rogers1d54e732013-05-02 21:10:01 -07001112void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001113 size_t freed_bytes = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -07001114 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -07001115
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001116 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
1117 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001118 SweepSystemWeaksArray(allocations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001119
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001120 timings_.StartSplit("SweepArray");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001121 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
1122 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -07001123 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1124 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1125 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1126 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1127 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001128 if (swap_bitmaps) {
1129 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001130 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001131 }
1132
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001133 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001134 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001135 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001136 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001137 Object** out = objects;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001138 Object** objects_to_chunk_free = out;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001139
1140 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -07001141 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001142 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001143 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001144 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
1145 if (LIKELY(mark_bitmap->HasAddress(obj))) {
1146 if (!mark_bitmap->Test(obj)) {
1147 // Don't bother un-marking since we clear the mark bitmap anyways.
1148 *(out++) = obj;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001149 // Free objects in chunks.
1150 DCHECK_GE(out, objects_to_chunk_free);
1151 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1152 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001153 // timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001154 size_t chunk_freed_objects = out - objects_to_chunk_free;
1155 freed_objects += chunk_freed_objects;
1156 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
1157 objects_to_chunk_free = out;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001158 // timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001159 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001160 }
1161 } else if (!large_mark_objects->Test(obj)) {
1162 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001163 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001164 }
1165 }
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001166 // Free the remaining objects in chunks.
1167 DCHECK_GE(out, objects_to_chunk_free);
1168 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1169 if (out - objects_to_chunk_free > 0) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001170 // timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001171 size_t chunk_freed_objects = out - objects_to_chunk_free;
1172 freed_objects += chunk_freed_objects;
1173 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001174 // timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001175 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001176 CHECK_EQ(count, allocations->Size());
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001177 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001178
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001179 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001180 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001181 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001182 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001183 freed_objects_.fetch_add(freed_objects);
1184 freed_bytes_.fetch_add(freed_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001185 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001186
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001187 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001188 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001189 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001190}
1191
Ian Rogers1d54e732013-05-02 21:10:01 -07001192void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001193 DCHECK(mark_stack_->IsEmpty());
Anwar Ghuloum46543222013-08-12 09:28:42 -07001194 base::TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001195
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001196 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
1197 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001198 SweepSystemWeaks();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001199
Ian Rogers1d54e732013-05-02 21:10:01 -07001200 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001201 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001202 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -07001203 scc.self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001204 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001205 // We always sweep always collect spaces.
1206 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
1207 if (!partial && !sweep_space) {
1208 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
1209 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
1210 }
1211 if (sweep_space) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001212 auto begin = reinterpret_cast<uintptr_t>(space->Begin());
1213 auto end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -07001214 scc.space = space->AsDlMallocSpace();
1215 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1216 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001217 if (swap_bitmaps) {
1218 std::swap(live_bitmap, mark_bitmap);
1219 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001220 if (!space->IsZygoteSpace()) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001221 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001222 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -07001223 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1224 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001225 } else {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001226 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001227 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
1228 // memory.
1229 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1230 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001231 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001232 }
1233 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001234
1235 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001236}
1237
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001238void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001239 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001240 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -07001241 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1242 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1243 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001244 if (swap_bitmaps) {
1245 std::swap(large_live_objects, large_mark_objects);
1246 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001247 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001248 // O(n*log(n)) but hopefully there are not too many large objects.
1249 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001250 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -07001251 Thread* self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001252 for (const Object* obj : live_objects) {
1253 if (!large_mark_objects->Test(obj)) {
1254 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001255 ++freed_objects;
1256 }
1257 }
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001258 freed_objects_.fetch_add(freed_objects);
1259 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001260 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001261}
1262
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001263void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001264 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001265 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001266 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001267
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001268 bool is_marked = IsMarked(ref);
1269 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001270 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001271 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1272 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1273 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1274 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001275
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001276 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1277 DCHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001278 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001279 DCHECK(fields != NULL);
1280 bool found = false;
1281 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001282 const ArtField* cur = fields->Get(i);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001283 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1284 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1285 found = true;
1286 break;
1287 }
1288 }
1289 if (!found) {
1290 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1291 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001292
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001293 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1294 if (!obj_marked) {
1295 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1296 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1297 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001298 }
1299 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001300 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001301 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001302 }
1303}
1304
Carl Shapiro69759ea2011-07-21 18:13:35 -07001305// Process the "referent" field in a java.lang.ref.Reference. If the
1306// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001307// the heap for later processing.
1308void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1309 DCHECK(klass != nullptr);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001310 DCHECK(klass->IsReferenceClass());
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001311 DCHECK(obj != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001312 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001313 if (referent != NULL && !IsMarked(referent)) {
1314 if (kCountJavaLangRefs) {
1315 ++reference_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001316 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001317 Thread* self = Thread::Current();
1318 // TODO: Remove these locks, and use atomic stacks for storing references?
1319 if (klass->IsSoftReferenceClass()) {
1320 MutexLock mu(self, *heap_->GetSoftRefQueueLock());
1321 heap_->EnqueuePendingReference(obj, &soft_reference_list_);
1322 } else if (klass->IsWeakReferenceClass()) {
1323 MutexLock mu(self, *heap_->GetWeakRefQueueLock());
1324 heap_->EnqueuePendingReference(obj, &weak_reference_list_);
1325 } else if (klass->IsFinalizerReferenceClass()) {
1326 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
1327 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
1328 } else if (klass->IsPhantomReferenceClass()) {
1329 MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
1330 heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
1331 } else {
1332 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
1333 << " " << std::hex << klass->GetAccessFlags();
1334 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001335 }
1336}
1337
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001338void MarkSweep::ScanRoot(const Object* obj) {
1339 ScanObject(obj);
1340}
1341
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001342class MarkObjectVisitor {
1343 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001344 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001345
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001346 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001347 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001348 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001349 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001350 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001351 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1352 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1353 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001354 mark_sweep_->MarkObject(ref);
1355 }
1356
1357 private:
1358 MarkSweep* const mark_sweep_;
1359};
1360
Carl Shapiro69759ea2011-07-21 18:13:35 -07001361// Scans an object reference. Determines the type of the reference
1362// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001363void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001364 MarkObjectVisitor visitor(this);
1365 ScanObjectVisit(obj, visitor);
1366}
1367
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001368void MarkSweep::ProcessMarkStackParallel(bool paused) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001369 Thread* self = Thread::Current();
1370 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001371 const size_t num_threads = thread_pool->GetThreadCount();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001372 const size_t chunk_size =
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001373 std::min(mark_stack_->Size() / num_threads + 1,
1374 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1375 CHECK_GT(chunk_size, 0U);
1376 // Split the current mark stack up into work tasks.
1377 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1378 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1379 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1380 const_cast<const mirror::Object**>(it)));
1381 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001382 }
1383 thread_pool->StartWorkers(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001384 // Don't do work in the main thread since it assumed at least one other thread will require CPU
1385 // time during the GC.
1386 thread_pool->Wait(self, paused, true);
1387 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001388 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001389 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001390}
1391
Ian Rogers5d76c432011-10-31 21:42:49 -07001392// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001393void MarkSweep::ProcessMarkStack(bool paused) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001394 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001395 const bool parallel = kParallelProcessMarkStack && GetHeap()->GetThreadPool() &&
1396 mark_stack_->Size() >= kMinimumParallelMarkStackSize;
1397 if (parallel) {
1398 ProcessMarkStackParallel(paused);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001399 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001400 // TODO: Tune this.
1401 static const size_t kFifoSize = 4;
1402 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
1403 for (;;) {
1404 const Object* obj = NULL;
1405 if (kUseMarkStackPrefetch) {
1406 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1407 const Object* obj = mark_stack_->PopBack();
1408 DCHECK(obj != NULL);
1409 __builtin_prefetch(obj);
1410 prefetch_fifo.push_back(obj);
1411 }
1412 if (prefetch_fifo.empty()) {
1413 break;
1414 }
1415 obj = prefetch_fifo.front();
1416 prefetch_fifo.pop_front();
1417 } else {
1418 if (mark_stack_->IsEmpty()) {
1419 break;
1420 }
1421 obj = mark_stack_->PopBack();
1422 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001423 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001424 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001425 }
1426 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001427 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001428}
1429
Carl Shapiro69759ea2011-07-21 18:13:35 -07001430// Walks the reference list marking any references subject to the
1431// reference clearing policy. References with a black referent are
1432// removed from the list. References with white referents biased
1433// toward saving are blackened and also removed from the list.
1434void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1435 DCHECK(list != NULL);
1436 Object* clear = NULL;
1437 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001438
1439 DCHECK(mark_stack_->IsEmpty());
1440
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001441 timings_.StartSplit("PreserveSomeSoftReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001442 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001443 Object* ref = heap_->DequeuePendingReference(list);
1444 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001445 if (referent == NULL) {
1446 // Referent was cleared by the user during marking.
1447 continue;
1448 }
1449 bool is_marked = IsMarked(referent);
1450 if (!is_marked && ((++counter) & 1)) {
1451 // Referent is white and biased toward saving, mark it.
1452 MarkObject(referent);
1453 is_marked = true;
1454 }
1455 if (!is_marked) {
1456 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001457 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001458 }
1459 }
1460 *list = clear;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001461 timings_.EndSplit();
1462
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001463 // Restart the mark with the newly black references added to the root set.
1464 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001465}
1466
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001467inline bool MarkSweep::IsMarked(const Object* object) const
1468 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001469 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001470 return true;
1471 }
1472 DCHECK(current_mark_bitmap_ != NULL);
1473 if (current_mark_bitmap_->HasAddress(object)) {
1474 return current_mark_bitmap_->Test(object);
1475 }
1476 return heap_->GetMarkBitmap()->Test(object);
1477}
1478
1479
Carl Shapiro69759ea2011-07-21 18:13:35 -07001480// Unlink the reference list clearing references objects with white
1481// referents. Cleared references registered to a reference queue are
1482// scheduled for appending by the heap worker thread.
1483void MarkSweep::ClearWhiteReferences(Object** list) {
1484 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001485 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001486 Object* ref = heap_->DequeuePendingReference(list);
1487 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001488 if (referent != NULL && !IsMarked(referent)) {
1489 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001490 heap_->ClearReferenceReferent(ref);
1491 if (heap_->IsEnqueuable(ref)) {
1492 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001493 }
1494 }
1495 }
1496 DCHECK(*list == NULL);
1497}
1498
1499// Enqueues finalizer references with white referents. White
1500// referents are blackened, moved to the zombie field, and the
1501// referent field is cleared.
1502void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1503 DCHECK(list != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001504 timings_.StartSplit("EnqueueFinalizerReferences");
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001505 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001506 bool has_enqueued = false;
1507 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001508 Object* ref = heap_->DequeuePendingReference(list);
1509 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001510 if (referent != NULL && !IsMarked(referent)) {
1511 MarkObject(referent);
1512 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001513 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001514 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001515 heap_->ClearReferenceReferent(ref);
1516 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001517 has_enqueued = true;
1518 }
1519 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001520 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001521 if (has_enqueued) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001522 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001523 }
1524 DCHECK(*list == NULL);
1525}
1526
Carl Shapiro58551df2011-07-24 03:09:51 -07001527// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001528void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1529 Object** weak_references,
1530 Object** finalizer_references,
1531 Object** phantom_references) {
1532 DCHECK(soft_references != NULL);
1533 DCHECK(weak_references != NULL);
1534 DCHECK(finalizer_references != NULL);
1535 DCHECK(phantom_references != NULL);
1536
1537 // Unless we are in the zygote or required to clear soft references
1538 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001539 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001540 PreserveSomeSoftReferences(soft_references);
1541 }
1542
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001543 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001544 // Clear all remaining soft and weak references with white
1545 // referents.
1546 ClearWhiteReferences(soft_references);
1547 ClearWhiteReferences(weak_references);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001548 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001549
1550 // Preserve all white objects with finalize methods and schedule
1551 // them for finalization.
1552 EnqueueFinalizerReferences(finalizer_references);
1553
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001554 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001555 // Clear all f-reachable soft and weak references with white
1556 // referents.
1557 ClearWhiteReferences(soft_references);
1558 ClearWhiteReferences(weak_references);
1559
1560 // Clear all phantom references with white referents.
1561 ClearWhiteReferences(phantom_references);
1562
1563 // At this point all reference lists should be empty.
1564 DCHECK(*soft_references == NULL);
1565 DCHECK(*weak_references == NULL);
1566 DCHECK(*finalizer_references == NULL);
1567 DCHECK(*phantom_references == NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001568 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001569}
1570
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001571void MarkSweep::UnBindBitmaps() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001572 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001573 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001574 if (space->IsDlMallocSpace()) {
1575 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001576 if (alloc_space->temp_bitmap_.get() != NULL) {
1577 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001578 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001579 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1580 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1581 alloc_space->mark_bitmap_.reset(new_bitmap);
1582 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1583 }
1584 }
1585 }
1586}
1587
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001588void MarkSweep::FinishPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001589 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1590 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001591 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001592 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001593 timings_.NewSplit("EnqueueClearedReferences");
Ian Rogers1d54e732013-05-02 21:10:01 -07001594 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001595
Anwar Ghuloum46543222013-08-12 09:28:42 -07001596 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001597 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001598
Anwar Ghuloum46543222013-08-12 09:28:42 -07001599 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001600 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001601
Anwar Ghuloum46543222013-08-12 09:28:42 -07001602 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001603 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001604
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001605 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001606 total_time_ns_ += GetDurationNs();
1607 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1608 std::plus<uint64_t>());
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001609 total_freed_objects_ += GetFreedObjects();
1610 total_freed_bytes_ += GetFreedBytes();
1611
1612 // Ensure that the mark stack is empty.
1613 CHECK(mark_stack_->IsEmpty());
1614
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001615 if (kCountScannedTypes) {
1616 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1617 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001618 }
1619
1620 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001621 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001622 }
1623
1624 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001625 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001626 }
1627
1628 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001629 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001630 }
1631
1632 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001633 VLOG(gc) << "Classes marked " << classes_marked_;
1634 }
1635
1636 if (kCountJavaLangRefs) {
1637 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001638 }
1639
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001640 // Update the cumulative loggers.
1641 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001642 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001643 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001644
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001645 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001646 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001647 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001648 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001649 }
1650 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001651 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001652
1653 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001654 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001655 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001656}
1657
Ian Rogers1d54e732013-05-02 21:10:01 -07001658} // namespace collector
1659} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001660} // namespace art