blob: 167140cb97688832c2576455cbcdeb63e91543a8 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
31#include "gc/accounting/space_bitmap-inl.h"
32#include "gc/heap.h"
33#include "gc/space/image_space.h"
34#include "gc/space/large_object_space.h"
35#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070036#include "indirect_reference_table.h"
37#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070038#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070039#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070041#include "mirror/art_field.h"
42#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "mirror/class-inl.h"
44#include "mirror/class_loader.h"
45#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080046#include "mirror/object-inl.h"
47#include "mirror/object_array.h"
48#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070049#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070050#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070051#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070052#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070053
Brian Carlstromea46f952013-07-30 01:26:50 -070054using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070055using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070056using ::art::mirror::Object;
57using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080058
Carl Shapiro69759ea2011-07-21 18:13:35 -070059namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070060namespace gc {
61namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070062
Mathieu Chartier02b6a782012-10-26 13:51:26 -070063// Performance options.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070064constexpr bool kUseRecursiveMark = false;
65constexpr bool kUseMarkStackPrefetch = true;
66constexpr size_t kSweepArrayChunkFreeSize = 1024;
67
68// Parallelism options.
69constexpr bool kParallelCardScan = true;
70constexpr bool kParallelRecursiveMark = true;
71// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
72// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
73// having this can add overhead in ProcessReferences since we may end up doing many calls of
74// ProcessMarkStack with very small mark stacks.
75constexpr size_t kMinimumParallelMarkStackSize = 128;
76constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070077
Mathieu Chartier02b6a782012-10-26 13:51:26 -070078// Profiling and information flags.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070079constexpr bool kCountClassesMarked = false;
80constexpr bool kProfileLargeObjects = false;
81constexpr bool kMeasureOverhead = false;
82constexpr bool kCountTasks = false;
83constexpr bool kCountJavaLangRefs = false;
84
85// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
86constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070087
Ian Rogers1d54e732013-05-02 21:10:01 -070088void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080089 // Bind live to mark bitmap if necessary.
90 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
91 BindLiveToMarkBitmap(space);
92 }
93
94 // Add the space to the immune region.
95 if (immune_begin_ == NULL) {
96 DCHECK(immune_end_ == NULL);
97 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
98 reinterpret_cast<Object*>(space->End()));
99 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700100 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -0700101 // Find out if the previous space is immune.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700102 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
103 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700104 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800105 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700106 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 // If previous space was immune, then extend the immune region. Relies on continuous spaces
109 // being sorted by Heap::AddContinuousSpace.
110 if (prev_space != NULL &&
111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
115 }
116 }
117}
118
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800119void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700120 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800122 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700123 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700124 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800125 ImmuneSpace(space);
126 }
127 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700128 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129}
130
Ian Rogers1d54e732013-05-02 21:10:01 -0700131MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
132 : GarbageCollector(heap,
133 name_prefix + (name_prefix.empty() ? "" : " ") +
134 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
135 current_mark_bitmap_(NULL),
136 java_lang_Class_(NULL),
137 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700138 immune_begin_(NULL),
139 immune_end_(NULL),
140 soft_reference_list_(NULL),
141 weak_reference_list_(NULL),
142 finalizer_reference_list_(NULL),
143 phantom_reference_list_(NULL),
144 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800145 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800146 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700147 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Ian Rogers1bd4b4c2013-04-18 17:47:42 -0700148 is_concurrent_(is_concurrent),
Ian Rogers1d54e732013-05-02 21:10:01 -0700149 clear_soft_references_(false) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800150}
151
152void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700153 timings_.Reset();
Anwar Ghuloum46543222013-08-12 09:28:42 -0700154 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700155 mark_stack_ = heap_->mark_stack_.get();
156 DCHECK(mark_stack_ != nullptr);
157 SetImmuneRange(nullptr, nullptr);
158 soft_reference_list_ = nullptr;
159 weak_reference_list_ = nullptr;
160 finalizer_reference_list_ = nullptr;
161 phantom_reference_list_ = nullptr;
162 cleared_reference_list_ = nullptr;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800163 freed_bytes_ = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700164 freed_large_object_bytes_ = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800165 freed_objects_ = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700166 freed_large_objects_ = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800167 class_count_ = 0;
168 array_count_ = 0;
169 other_count_ = 0;
170 large_object_test_ = 0;
171 large_object_mark_ = 0;
172 classes_marked_ = 0;
173 overhead_time_ = 0;
174 work_chunks_created_ = 0;
175 work_chunks_deleted_ = 0;
176 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700177 java_lang_Class_ = Class::GetJavaLangClass();
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700178 CHECK(java_lang_Class_ != nullptr);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700179
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700180 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700181
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700182 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700183 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800184 heap_->PreGcVerification(this);
185}
186
187void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier9e452d12013-09-18 16:35:15 -0700188 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800190 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
191 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800192}
193
194bool MarkSweep::HandleDirtyObjectsPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700195 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800196 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800197 Locks::mutator_lock_->AssertExclusiveHeld(self);
198
199 {
200 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
201
202 // Re-mark root set.
203 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800204
205 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700206 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800207 }
208
209 ProcessReferences(self);
Mathieu Chartier9e452d12013-09-18 16:35:15 -0700210 {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700211 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier9e452d12013-09-18 16:35:15 -0700212 SweepSystemWeaks();
213 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800214
215 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700216 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
217 GetHeap()->verify_post_gc_heap_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800218 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
219 // This second sweep makes sure that we don't have any objects in the live stack which point to
220 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700221 SweepArray(GetHeap()->allocation_stack_.get(), false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800222 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700223
224 timings_.StartSplit("PreSweepingGcVerification");
225 heap_->PreSweepingGcVerification(this);
226 timings_.EndSplit();
227
228 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
229 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
230 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800231 return true;
232}
233
234bool MarkSweep::IsConcurrent() const {
235 return is_concurrent_;
236}
237
238void MarkSweep::MarkingPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700239 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240 Thread* self = Thread::Current();
241
242 BindBitmaps();
243 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700244
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800245 // Process dirty cards and add dirty cards to mod union tables.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700246 heap_->ProcessCards(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800247
248 // Need to do this before the checkpoint since we don't want any threads to add references to
249 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700250 timings_.NewSplit("SwapStacks");
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700251 heap_->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800252
253 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
254 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
255 // If we exclusively hold the mutator lock, all threads must be suspended.
256 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800257 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700258 MarkThreadRoots(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700259 // At this point the live stack should no longer have any mutators which push into it.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800260 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700262 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263 MarkConcurrentRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800264
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700265 heap_->UpdateAndMarkModUnion(this, timings_, GetGcType());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800266 MarkReachableObjects();
267}
268
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700269void MarkSweep::MarkThreadRoots(Thread* self) {
270 MarkRootsCheckpoint(self);
271}
272
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800273void MarkSweep::MarkReachableObjects() {
274 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
275 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700276 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700277 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800278 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700279 heap_->large_object_space_->GetLiveObjects(), live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800280 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700281 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800282 // Recursively mark all the non-image bits set in the mark bitmap.
283 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800284}
285
286void MarkSweep::ReclaimPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700287 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700288 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800289
290 if (!IsConcurrent()) {
291 ProcessReferences(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700292 {
293 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
294 SweepSystemWeaks();
295 }
296 timings_.StartSplit("PreSweepingGcVerification");
297 heap_->PreSweepingGcVerification(this);
298 timings_.EndSplit();
Mathieu Chartier9642c962013-08-05 17:40:36 -0700299 } else {
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700300 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700301 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700302 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier9642c962013-08-05 17:40:36 -0700303 // The allocation stack contains things allocated since the start of the GC. These may have been
304 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
305 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
306 // collection.
307 // There is a race here which is safely handled. Another thread such as the hprof could
308 // have flushed the alloc stack after we resumed the threads. This is safe however, since
309 // reseting the allocation stack zeros it out with madvise. This means that we will either
310 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
311 // first place.
312 mirror::Object** end = allocation_stack->End();
313 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700314 const Object* obj = *it;
Mathieu Chartier9642c962013-08-05 17:40:36 -0700315 if (obj != NULL) {
316 UnMarkObjectNonNull(obj);
317 }
318 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800319 }
320
321 // Before freeing anything, lets verify the heap.
322 if (kIsDebugBuild) {
323 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
324 VerifyImageRoots();
325 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800326
327 {
328 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
329
330 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700331 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800332
333 // Swap the live and mark bitmaps for each space which we modified space. This is an
334 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
335 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700336 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800337 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700338 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800339
340 // Unbind the live and mark bitmaps.
341 UnBindBitmaps();
342 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800343}
344
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800345void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
346 immune_begin_ = begin;
347 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700348}
349
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700350void MarkSweep::FindDefaultMarkBitmap() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700351 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700352 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700353 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700354 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700355 CHECK(current_mark_bitmap_ != NULL);
356 return;
357 }
358 }
359 GetHeap()->DumpSpaces();
360 LOG(FATAL) << "Could not find a default mark bitmap";
361}
362
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800363void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700364 ResizeMarkStack(mark_stack_->Capacity() * 2);
365}
366
367void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800368 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800369 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
370 // Someone else acquired the lock and expanded the mark stack before us.
371 return;
372 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700373 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700374 CHECK_LE(mark_stack_->Size(), new_size);
375 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700376 for (const auto& obj : temp) {
377 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800378 }
379}
380
Mathieu Chartier9642c962013-08-05 17:40:36 -0700381inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800382 DCHECK(obj != NULL);
383 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700384 MutexLock mu(Thread::Current(), mark_stack_lock_);
385 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700386 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800387 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700388 // The object must be pushed on to the mark stack.
389 mark_stack_->PushBack(const_cast<Object*>(obj));
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800390 }
391}
392
Mathieu Chartier9642c962013-08-05 17:40:36 -0700393inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
394 DCHECK(!IsImmune(obj));
395 // Try to take advantage of locality of references within a space, failing this find the space
396 // the hard way.
397 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
398 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
399 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
400 if (LIKELY(new_bitmap != NULL)) {
401 object_bitmap = new_bitmap;
402 } else {
403 MarkLargeObject(obj, false);
404 return;
405 }
406 }
407
408 DCHECK(object_bitmap->HasAddress(obj));
409 object_bitmap->Clear(obj);
410}
411
412inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700413 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700414
Mathieu Chartier9642c962013-08-05 17:40:36 -0700415 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700416 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700417 return;
418 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700419
420 // Try to take advantage of locality of references within a space, failing this find the space
421 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700422 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700423 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700424 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
425 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700426 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700427 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700428 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700429 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700430 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700431 }
432
Carl Shapiro69759ea2011-07-21 18:13:35 -0700433 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700434 if (!object_bitmap->Test(obj)) {
435 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700436 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700437 // Lock is not needed but is here anyways to please annotalysis.
438 MutexLock mu(Thread::Current(), mark_stack_lock_);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700439 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700440 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700441 // The object must be pushed on to the mark stack.
442 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700443 }
444}
445
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700446// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700447bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700448 // TODO: support >1 discontinuous space.
449 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
450 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700451 if (kProfileLargeObjects) {
452 ++large_object_test_;
453 }
454 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700455 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700456 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
457 LOG(ERROR) << "Attempting see if it's a bad root";
458 VerifyRoots();
459 LOG(FATAL) << "Can't mark bad root";
460 }
461 if (kProfileLargeObjects) {
462 ++large_object_mark_;
463 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700464 if (set) {
465 large_objects->Set(obj);
466 } else {
467 large_objects->Clear(obj);
468 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700469 return true;
470 }
471 return false;
472}
473
474inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
475 DCHECK(obj != NULL);
476
Mathieu Chartier9642c962013-08-05 17:40:36 -0700477 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700478 DCHECK(IsMarked(obj));
479 return false;
480 }
481
482 // Try to take advantage of locality of references within a space, failing this find the space
483 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700484 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700485 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700486 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700487 if (new_bitmap != NULL) {
488 object_bitmap = new_bitmap;
489 } else {
490 // TODO: Remove the Thread::Current here?
491 // TODO: Convert this to some kind of atomic marking?
492 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700493 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700494 }
495 }
496
497 // Return true if the object was not previously marked.
498 return !object_bitmap->AtomicTestAndSet(obj);
499}
500
Carl Shapiro69759ea2011-07-21 18:13:35 -0700501// Used to mark objects when recursing. Recursion is done by moving
502// the finger across the bitmaps in address order and marking child
503// objects. Any newly-marked objects whose addresses are lower than
504// the finger won't be visited by the bitmap scan, so those objects
505// need to be added to the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700506inline void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700507 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700508 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700509 }
510}
511
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800512void MarkSweep::MarkRoot(const Object* obj) {
513 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700514 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800515 }
516}
517
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800518void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
519 DCHECK(root != NULL);
520 DCHECK(arg != NULL);
Mathieu Chartierba311b42013-08-27 13:02:30 -0700521 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800522}
523
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700524void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700525 DCHECK(root != NULL);
526 DCHECK(arg != NULL);
527 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700528 mark_sweep->MarkObjectNonNull(root);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700529}
530
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700531void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
532 DCHECK(root != NULL);
533 DCHECK(arg != NULL);
534 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700535 mark_sweep->MarkObjectNonNull(root);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700536}
537
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700538void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800539 const StackVisitor* visitor) {
540 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700541}
542
Ian Rogers40e3bac2012-11-20 00:09:14 -0800543void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700544 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700545 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
546 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700547 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700548 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800549 if (visitor != NULL) {
550 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700551 }
552 }
553 }
554}
555
556void MarkSweep::VerifyRoots() {
557 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
558}
559
Carl Shapiro69759ea2011-07-21 18:13:35 -0700560// Marks all objects in the root set.
561void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700562 timings_.StartSplit("MarkRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700563 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700564 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700565}
566
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700567void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700568 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700569 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700570 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700571}
572
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700573void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700574 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700575 // Visit all runtime roots and clear dirty flags.
576 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700577 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700578}
579
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700580void MarkSweep::CheckObject(const Object* obj) {
581 DCHECK(obj != NULL);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700582 VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset,
583 bool is_static) NO_THREAD_SAFETY_ANALYSIS {
584 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
585 CheckReference(obj, ref, offset, is_static);
586 });
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700587}
588
589void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
590 DCHECK(root != NULL);
591 DCHECK(arg != NULL);
592 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700593 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700594 mark_sweep->CheckObject(root);
595}
596
Ian Rogers1d54e732013-05-02 21:10:01 -0700597void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
598 CHECK(space->IsDlMallocSpace());
599 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
600 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
601 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700602 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
603 alloc_space->temp_bitmap_.reset(mark_bitmap);
604 alloc_space->mark_bitmap_.reset(live_bitmap);
605}
606
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700607class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700608 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700609 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
610 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700611
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800612 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700613 void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
614 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800615 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
616 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
617 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700618 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700619 }
620
621 private:
622 MarkSweep* const mark_sweep_;
623};
624
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700625template <bool kUseFinger = false>
626class MarkStackTask : public Task {
627 public:
628 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
629 const Object** mark_stack)
630 : mark_sweep_(mark_sweep),
631 thread_pool_(thread_pool),
632 mark_stack_pos_(mark_stack_size) {
633 // We may have to copy part of an existing mark stack when another mark stack overflows.
634 if (mark_stack_size != 0) {
635 DCHECK(mark_stack != NULL);
636 // TODO: Check performance?
637 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700638 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700639 if (kCountTasks) {
640 ++mark_sweep_->work_chunks_created_;
641 }
642 }
643
644 static const size_t kMaxSize = 1 * KB;
645
646 protected:
647 class ScanObjectParallelVisitor {
648 public:
649 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
650 : chunk_task_(chunk_task) {}
651
652 void operator()(const Object* obj) const {
653 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
654 mark_sweep->ScanObjectVisit(obj,
655 [mark_sweep, this](const Object* /* obj */, const Object* ref,
656 const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE {
657 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
658 if (kUseFinger) {
659 android_memory_barrier();
660 if (reinterpret_cast<uintptr_t>(ref) >=
661 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
662 return;
663 }
664 }
665 chunk_task_->MarkStackPush(ref);
666 }
667 });
668 }
669
670 private:
671 MarkStackTask<kUseFinger>* const chunk_task_;
672 };
673
674 virtual ~MarkStackTask() {
675 // Make sure that we have cleared our mark stack.
676 DCHECK_EQ(mark_stack_pos_, 0U);
677 if (kCountTasks) {
678 ++mark_sweep_->work_chunks_deleted_;
679 }
680 }
681
682 MarkSweep* const mark_sweep_;
683 ThreadPool* const thread_pool_;
684 // Thread local mark stack for this task.
685 const Object* mark_stack_[kMaxSize];
686 // Mark stack position.
687 size_t mark_stack_pos_;
688
689 void MarkStackPush(const Object* obj) ALWAYS_INLINE {
690 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
691 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
692 mark_stack_pos_ /= 2;
693 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
694 mark_stack_ + mark_stack_pos_);
695 thread_pool_->AddTask(Thread::Current(), task);
696 }
697 DCHECK(obj != nullptr);
698 DCHECK(mark_stack_pos_ < kMaxSize);
699 mark_stack_[mark_stack_pos_++] = obj;
700 }
701
702 virtual void Finalize() {
703 delete this;
704 }
705
706 // Scans all of the objects
707 virtual void Run(Thread* self) {
708 ScanObjectParallelVisitor visitor(this);
709 // TODO: Tune this.
710 static const size_t kFifoSize = 4;
711 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
712 for (;;) {
713 const Object* obj = NULL;
714 if (kUseMarkStackPrefetch) {
715 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
716 const Object* obj = mark_stack_[--mark_stack_pos_];
717 DCHECK(obj != NULL);
718 __builtin_prefetch(obj);
719 prefetch_fifo.push_back(obj);
720 }
721 if (UNLIKELY(prefetch_fifo.empty())) {
722 break;
723 }
724 obj = prefetch_fifo.front();
725 prefetch_fifo.pop_front();
726 } else {
727 if (UNLIKELY(mark_stack_pos_ == 0)) {
728 break;
729 }
730 obj = mark_stack_[--mark_stack_pos_];
731 }
732 DCHECK(obj != NULL);
733 visitor(obj);
734 }
735 }
736};
737
738class CardScanTask : public MarkStackTask<false> {
739 public:
740 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
741 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
742 const Object** mark_stack_obj)
743 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
744 bitmap_(bitmap),
745 begin_(begin),
746 end_(end),
747 minimum_age_(minimum_age) {
748 }
749
750 protected:
751 accounting::SpaceBitmap* const bitmap_;
752 byte* const begin_;
753 byte* const end_;
754 const byte minimum_age_;
755
756 virtual void Finalize() {
757 delete this;
758 }
759
760 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
761 ScanObjectParallelVisitor visitor(this);
762 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700763 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
764 mark_sweep_->cards_scanned_.fetch_add(cards_scanned);
765 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
766 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700767 // Finish by emptying our local mark stack.
768 MarkStackTask::Run(self);
769 }
770};
771
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700772size_t MarkSweep::GetThreadCount(bool paused) const {
773 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
774 return 0;
775 }
776 if (paused) {
777 return heap_->GetParallelGCThreadCount() + 1;
778 } else {
779 return heap_->GetConcGCThreadCount() + 1;
780 }
781}
782
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700783void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
784 accounting::CardTable* card_table = GetHeap()->GetCardTable();
785 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700786 size_t thread_count = GetThreadCount(paused);
787 // The parallel version with only one thread is faster for card scanning, TODO: fix.
788 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700789 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700790 // Can't have a different split for each space since multiple spaces can have their cards being
791 // scanned at the same time.
792 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
793 // Try to take some of the mark stack since we can pass this off to the worker tasks.
794 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
795 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
Mathieu Chartier720ef762013-08-17 14:46:54 -0700796 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700797 // Estimated number of work tasks we will create.
798 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
799 DCHECK_NE(mark_stack_tasks, 0U);
800 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
801 mark_stack_size / mark_stack_tasks + 1);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700802 size_t ref_card_count = 0;
803 cards_scanned_ = 0;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700804 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
805 byte* card_begin = space->Begin();
806 byte* card_end = space->End();
807 // Calculate how many bytes of heap we will scan,
808 const size_t address_range = card_end - card_begin;
809 // Calculate how much address range each task gets.
810 const size_t card_delta = RoundUp(address_range / thread_count + 1,
811 accounting::CardTable::kCardSize);
812 // Create the worker tasks for this space.
813 while (card_begin != card_end) {
814 // Add a range of cards.
815 size_t addr_remaining = card_end - card_begin;
816 size_t card_increment = std::min(card_delta, addr_remaining);
817 // Take from the back of the mark stack.
818 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
819 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
820 mark_stack_end -= mark_stack_increment;
821 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
822 DCHECK_EQ(mark_stack_end, mark_stack_->End());
823 // Add the new task to the thread pool.
824 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
825 card_begin + card_increment, minimum_age,
826 mark_stack_increment, mark_stack_end);
827 thread_pool->AddTask(self, task);
828 card_begin += card_increment;
829 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700830
831 if (paused && kIsDebugBuild) {
832 // Make sure we don't miss scanning any cards.
833 size_t scanned_cards = card_table->Scan(space->GetMarkBitmap(), space->Begin(),
834 space->End(), VoidFunctor(), minimum_age);
835 VLOG(heap) << "Scanning space cards " << reinterpret_cast<void*>(space->Begin()) << " - "
836 << reinterpret_cast<void*>(space->End()) << " = " << scanned_cards;
837 ref_card_count += scanned_cards;
838 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700839 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700840
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700841 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700842 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700843 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700844 thread_pool->StopWorkers(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700845 if (paused) {
846 DCHECK_EQ(ref_card_count, static_cast<size_t>(cards_scanned_.load()));
847 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700848 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700849 } else {
850 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
851 // Image spaces are handled properly since live == marked for them.
852 switch (space->GetGcRetentionPolicy()) {
853 case space::kGcRetentionPolicyNeverCollect:
854 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
855 "ScanGrayImageSpaceObjects");
856 break;
857 case space::kGcRetentionPolicyFullCollect:
858 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
859 "ScanGrayZygoteSpaceObjects");
860 break;
861 case space::kGcRetentionPolicyAlwaysCollect:
862 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
863 "ScanGrayAllocSpaceObjects");
864 break;
865 }
866 ScanObjectVisitor visitor(this);
867 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
868 timings_.EndSplit();
869 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700870 }
871}
872
873void MarkSweep::VerifyImageRoots() {
874 // Verify roots ensures that all the references inside the image space point
875 // objects which are either in the image space or marked objects in the alloc
876 // space
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700877 timings_.StartSplit("VerifyImageRoots");
Mathieu Chartier02e25112013-08-14 16:14:24 -0700878 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
879 if (space->IsImageSpace()) {
880 space::ImageSpace* image_space = space->AsImageSpace();
881 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
882 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
883 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700884 DCHECK(live_bitmap != NULL);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700885 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
886 if (kCheckLocks) {
887 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
888 }
889 DCHECK(obj != NULL);
890 CheckObject(obj);
891 });
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700892 }
893 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700894 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700895}
896
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700897class RecursiveMarkTask : public MarkStackTask<false> {
898 public:
899 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
900 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
901 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
902 bitmap_(bitmap),
903 begin_(begin),
904 end_(end) {
905 }
906
907 protected:
908 accounting::SpaceBitmap* const bitmap_;
909 const uintptr_t begin_;
910 const uintptr_t end_;
911
912 virtual void Finalize() {
913 delete this;
914 }
915
916 // Scans all of the objects
917 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
918 ScanObjectParallelVisitor visitor(this);
919 bitmap_->VisitMarkedRange(begin_, end_, visitor);
920 // Finish by emptying our local mark stack.
921 MarkStackTask::Run(self);
922 }
923};
924
Carl Shapiro58551df2011-07-24 03:09:51 -0700925// Populates the mark stack based on the set of marked objects and
926// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800927void MarkSweep::RecursiveMark() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700928 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700929 // RecursiveMark will build the lists of known instances of the Reference classes.
930 // See DelayReferenceReferent for details.
931 CHECK(soft_reference_list_ == NULL);
932 CHECK(weak_reference_list_ == NULL);
933 CHECK(finalizer_reference_list_ == NULL);
934 CHECK(phantom_reference_list_ == NULL);
935 CHECK(cleared_reference_list_ == NULL);
936
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700937 if (kUseRecursiveMark) {
938 const bool partial = GetGcType() == kGcTypePartial;
939 ScanObjectVisitor scan_visitor(this);
940 auto* self = Thread::Current();
941 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700942 size_t thread_count = GetThreadCount(false);
943 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700944 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700945 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700946 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
947 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800948 current_mark_bitmap_ = space->GetMarkBitmap();
949 if (current_mark_bitmap_ == NULL) {
950 GetHeap()->DumpSpaces();
951 LOG(FATAL) << "invalid bitmap";
952 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700953 if (parallel) {
954 // We will use the mark stack the future.
955 // CHECK(mark_stack_->IsEmpty());
956 // This function does not handle heap end increasing, so we must use the space end.
957 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
958 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
959 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
960
961 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700962 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700963 while (begin != end) {
964 uintptr_t start = begin;
965 uintptr_t delta = (end - begin) / n;
966 delta = RoundUp(delta, KB);
967 if (delta < 16 * KB) delta = end - begin;
968 begin += delta;
969 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
970 begin);
971 thread_pool->AddTask(self, task);
972 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700973 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700974 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700975 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700976 thread_pool->StopWorkers(self);
977 } else {
978 // This function does not handle heap end increasing, so we must use the space end.
979 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
980 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
981 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
982 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700983 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700984 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700985 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700986 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700987}
988
989bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
Mathieu Chartier9e452d12013-09-18 16:35:15 -0700990 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(object);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700991}
992
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700993void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
994 ScanGrayObjects(paused, minimum_age);
995 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700996}
997
Carl Shapiro58551df2011-07-24 03:09:51 -0700998void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700999 timings_.StartSplit("ReMarkRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -07001000 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001001 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001002}
1003
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001004void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -07001005 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogersb8a0b942013-08-20 18:09:52 -07001006 WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001007 for (const Object** entry : vm->weak_globals) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001008 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -07001009 *entry = kClearedJniWeakGlobal;
1010 }
1011 }
1012}
1013
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001014struct ArrayMarkedCheck {
Ian Rogers1d54e732013-05-02 21:10:01 -07001015 accounting::ObjectStack* live_stack;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001016 MarkSweep* mark_sweep;
1017};
1018
1019// Either marked or not live.
1020bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
1021 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
1022 if (array_check->mark_sweep->IsMarked(object)) {
1023 return true;
1024 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001025 accounting::ObjectStack* live_stack = array_check->live_stack;
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001026 if (std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End()) {
1027 return true;
1028 }
1029 return false;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001030}
1031
Ian Rogers1d54e732013-05-02 21:10:01 -07001032void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -07001033 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001034 // The callbacks check
1035 // !is_marked where is_marked is the callback but we want
1036 // !IsMarked && IsLive
1037 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
1038 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001039
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001040 timings_.StartSplit("SweepSystemWeaksArray");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001041 ArrayMarkedCheck visitor;
1042 visitor.live_stack = allocations;
1043 visitor.mark_sweep = this;
1044 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
1045 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
1046 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001047 timings_.EndSplit();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001048}
1049
1050void MarkSweep::SweepSystemWeaks() {
1051 Runtime* runtime = Runtime::Current();
1052 // The callbacks check
1053 // !is_marked where is_marked is the callback but we want
1054 // !IsMarked && IsLive
1055 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
1056 // Or for swapped (IsLive || !IsMarked).
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001057 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001058 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
1059 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
1060 SweepJniWeakGlobals(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001061 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -07001062}
1063
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001064bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
1065 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1066 // We don't actually want to sweep the object, so lets return "marked"
1067 return true;
1068}
1069
1070void MarkSweep::VerifyIsLive(const Object* obj) {
1071 Heap* heap = GetHeap();
1072 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001073 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001074 if (!large_object_space->GetLiveObjects()->Test(obj)) {
1075 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1076 heap->allocation_stack_->End()) {
1077 // Object not found!
1078 heap->DumpSpaces();
1079 LOG(FATAL) << "Found dead object " << obj;
1080 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001081 }
1082 }
1083}
1084
1085void MarkSweep::VerifySystemWeaks() {
1086 Runtime* runtime = Runtime::Current();
1087 // Verify system weaks, uses a special IsMarked callback which always returns true.
1088 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
1089 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
1090
1091 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogersb8a0b942013-08-20 18:09:52 -07001092 ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001093 for (const Object** entry : vm->weak_globals) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001094 VerifyIsLive(*entry);
1095 }
1096}
1097
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001098struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001099 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -07001100 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001101 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001102};
1103
Mathieu Chartier0e4627e2012-10-23 16:13:36 -07001104class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001105 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001106 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001107
1108 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001109 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001110 // Note: self is not necessarily equal to thread since thread may be suspended.
1111 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001112 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1113 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -08001114 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001115 ATRACE_END();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001116 mark_sweep_->GetBarrier().Pass(self);
1117 }
1118
1119 private:
1120 MarkSweep* mark_sweep_;
1121};
1122
Ian Rogers1d54e732013-05-02 21:10:01 -07001123void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001124 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001125 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001126 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001127 // Request the check point is run on all threads returning a count of the threads that must
1128 // run through the barrier including self.
1129 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1130 // Release locks then wait for all mutator threads to pass the barrier.
1131 // TODO: optimize to not release locks when there are no threads to wait for.
1132 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1133 Locks::mutator_lock_->SharedUnlock(self);
1134 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1135 CHECK_EQ(old_state, kWaitingPerformingGc);
1136 gc_barrier_->Increment(self, barrier_count);
1137 self->SetState(kWaitingPerformingGc);
1138 Locks::mutator_lock_->SharedLock(self);
1139 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001140 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001141}
1142
Ian Rogers30fab402012-01-23 15:43:46 -08001143void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001144 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001145 MarkSweep* mark_sweep = context->mark_sweep;
1146 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -07001147 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001148 Thread* self = context->self;
1149 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -07001150 // Use a bulk free, that merges consecutive objects before freeing or free per object?
1151 // Documentation suggests better free performance with merging, but this may be at the expensive
1152 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001153 size_t freed_objects = num_ptrs;
1154 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
1155 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001156 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001157 mark_sweep->freed_objects_.fetch_add(freed_objects);
1158 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001159}
1160
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001161void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001162 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -07001163 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001164 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001165 // We don't free any actual memory to avoid dirtying the shared zygote pages.
1166 for (size_t i = 0; i < num_ptrs; ++i) {
1167 Object* obj = static_cast<Object*>(ptrs[i]);
1168 heap->GetLiveBitmap()->Clear(obj);
1169 heap->GetCardTable()->MarkCard(obj);
1170 }
1171}
1172
Ian Rogers1d54e732013-05-02 21:10:01 -07001173void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001174 space::DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -07001175
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001176 timings_.StartSplit("SweepArray");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001177 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
1178 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -07001179 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1180 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1181 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1182 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1183 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001184 if (swap_bitmaps) {
1185 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001186 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001187 }
1188
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001189 size_t freed_bytes = 0;
1190 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001191 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001192 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001193 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001194 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001195 Object** out = objects;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001196 Object** objects_to_chunk_free = out;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001197
1198 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -07001199 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001200 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001201 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001202 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
1203 if (LIKELY(mark_bitmap->HasAddress(obj))) {
1204 if (!mark_bitmap->Test(obj)) {
1205 // Don't bother un-marking since we clear the mark bitmap anyways.
1206 *(out++) = obj;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001207 // Free objects in chunks.
1208 DCHECK_GE(out, objects_to_chunk_free);
1209 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1210 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001211 timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001212 size_t chunk_freed_objects = out - objects_to_chunk_free;
1213 freed_objects += chunk_freed_objects;
1214 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
1215 objects_to_chunk_free = out;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001216 timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001217 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001218 }
1219 } else if (!large_mark_objects->Test(obj)) {
1220 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001221 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001222 }
1223 }
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001224 // Free the remaining objects in chunks.
1225 DCHECK_GE(out, objects_to_chunk_free);
1226 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1227 if (out - objects_to_chunk_free > 0) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001228 timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001229 size_t chunk_freed_objects = out - objects_to_chunk_free;
1230 freed_objects += chunk_freed_objects;
1231 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001232 timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001233 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001234 CHECK_EQ(count, allocations->Size());
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001235 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001236
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001237 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001238 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001239 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001240 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001241 freed_objects_.fetch_add(freed_objects);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001242 freed_large_objects_.fetch_add(freed_large_objects);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001243 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001244 freed_large_object_bytes_.fetch_add(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001245 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001246
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001247 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001248 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001249 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001250}
1251
Ian Rogers1d54e732013-05-02 21:10:01 -07001252void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001253 DCHECK(mark_stack_->IsEmpty());
Anwar Ghuloum46543222013-08-12 09:28:42 -07001254 base::TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001255
Ian Rogers1d54e732013-05-02 21:10:01 -07001256 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001257 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001258 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -07001259 scc.self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001260 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001261 // We always sweep always collect spaces.
1262 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
1263 if (!partial && !sweep_space) {
1264 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
1265 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
1266 }
1267 if (sweep_space) {
Mathieu Chartier720ef762013-08-17 14:46:54 -07001268 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1269 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -07001270 scc.space = space->AsDlMallocSpace();
1271 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1272 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001273 if (swap_bitmaps) {
1274 std::swap(live_bitmap, mark_bitmap);
1275 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001276 if (!space->IsZygoteSpace()) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001277 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001278 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -07001279 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1280 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001281 } else {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001282 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001283 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
1284 // memory.
1285 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1286 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001287 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001288 }
1289 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001290
1291 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001292}
1293
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001294void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001295 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001296 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -07001297 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1298 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1299 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001300 if (swap_bitmaps) {
1301 std::swap(large_live_objects, large_mark_objects);
1302 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001303 // O(n*log(n)) but hopefully there are not too many large objects.
1304 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001305 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -07001306 Thread* self = Thread::Current();
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001307 for (const Object* obj : large_live_objects->GetObjects()) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001308 if (!large_mark_objects->Test(obj)) {
1309 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001310 ++freed_objects;
1311 }
1312 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001313 freed_large_objects_.fetch_add(freed_objects);
1314 freed_large_object_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001315 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001316}
1317
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001318void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001319 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001320 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001321 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001322
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001323 bool is_marked = IsMarked(ref);
1324 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001325 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001326 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1327 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1328 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1329 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001330
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001331 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1332 DCHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001333 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001334 DCHECK(fields != NULL);
1335 bool found = false;
1336 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001337 const ArtField* cur = fields->Get(i);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001338 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1339 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1340 found = true;
1341 break;
1342 }
1343 }
1344 if (!found) {
1345 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1346 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001347
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001348 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1349 if (!obj_marked) {
1350 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1351 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1352 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001353 }
1354 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001355 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001356 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001357 }
1358}
1359
Carl Shapiro69759ea2011-07-21 18:13:35 -07001360// Process the "referent" field in a java.lang.ref.Reference. If the
1361// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001362// the heap for later processing.
1363void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1364 DCHECK(klass != nullptr);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001365 DCHECK(klass->IsReferenceClass());
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001366 DCHECK(obj != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001367 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001368 if (referent != NULL && !IsMarked(referent)) {
1369 if (kCountJavaLangRefs) {
1370 ++reference_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001371 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001372 Thread* self = Thread::Current();
1373 // TODO: Remove these locks, and use atomic stacks for storing references?
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001374 // We need to check that the references haven't already been enqueued since we can end up
1375 // scanning the same reference multiple times due to dirty cards.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001376 if (klass->IsSoftReferenceClass()) {
1377 MutexLock mu(self, *heap_->GetSoftRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001378 if (!heap_->IsEnqueued(obj)) {
1379 heap_->EnqueuePendingReference(obj, &soft_reference_list_);
1380 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001381 } else if (klass->IsWeakReferenceClass()) {
1382 MutexLock mu(self, *heap_->GetWeakRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001383 if (!heap_->IsEnqueued(obj)) {
1384 heap_->EnqueuePendingReference(obj, &weak_reference_list_);
1385 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001386 } else if (klass->IsFinalizerReferenceClass()) {
1387 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001388 if (!heap_->IsEnqueued(obj)) {
1389 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
1390 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001391 } else if (klass->IsPhantomReferenceClass()) {
1392 MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001393 if (!heap_->IsEnqueued(obj)) {
1394 heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
1395 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001396 } else {
1397 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
1398 << " " << std::hex << klass->GetAccessFlags();
1399 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001400 }
1401}
1402
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001403void MarkSweep::ScanRoot(const Object* obj) {
1404 ScanObject(obj);
1405}
1406
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001407class MarkObjectVisitor {
1408 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001409 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001410
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001411 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001412 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001413 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001414 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001415 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001416 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1417 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1418 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001419 mark_sweep_->MarkObject(ref);
1420 }
1421
1422 private:
1423 MarkSweep* const mark_sweep_;
1424};
1425
Carl Shapiro69759ea2011-07-21 18:13:35 -07001426// Scans an object reference. Determines the type of the reference
1427// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001428void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001429 MarkObjectVisitor visitor(this);
1430 ScanObjectVisit(obj, visitor);
1431}
1432
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001433void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001434 Thread* self = Thread::Current();
1435 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001436 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1437 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001438 CHECK_GT(chunk_size, 0U);
1439 // Split the current mark stack up into work tasks.
1440 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1441 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1442 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1443 const_cast<const mirror::Object**>(it)));
1444 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001445 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001446 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001447 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001448 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001449 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001450 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001451 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001452}
1453
Ian Rogers5d76c432011-10-31 21:42:49 -07001454// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001455void MarkSweep::ProcessMarkStack(bool paused) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001456 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001457 size_t thread_count = GetThreadCount(paused);
1458 if (kParallelProcessMarkStack && thread_count > 1 &&
1459 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1460 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001461 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001462 // TODO: Tune this.
1463 static const size_t kFifoSize = 4;
1464 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
1465 for (;;) {
1466 const Object* obj = NULL;
1467 if (kUseMarkStackPrefetch) {
1468 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1469 const Object* obj = mark_stack_->PopBack();
1470 DCHECK(obj != NULL);
1471 __builtin_prefetch(obj);
1472 prefetch_fifo.push_back(obj);
1473 }
1474 if (prefetch_fifo.empty()) {
1475 break;
1476 }
1477 obj = prefetch_fifo.front();
1478 prefetch_fifo.pop_front();
1479 } else {
1480 if (mark_stack_->IsEmpty()) {
1481 break;
1482 }
1483 obj = mark_stack_->PopBack();
1484 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001485 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001486 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001487 }
1488 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001489 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001490}
1491
Carl Shapiro69759ea2011-07-21 18:13:35 -07001492// Walks the reference list marking any references subject to the
1493// reference clearing policy. References with a black referent are
1494// removed from the list. References with white referents biased
1495// toward saving are blackened and also removed from the list.
1496void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1497 DCHECK(list != NULL);
1498 Object* clear = NULL;
1499 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001500
1501 DCHECK(mark_stack_->IsEmpty());
1502
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001503 timings_.StartSplit("PreserveSomeSoftReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001504 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001505 Object* ref = heap_->DequeuePendingReference(list);
1506 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001507 if (referent == NULL) {
1508 // Referent was cleared by the user during marking.
1509 continue;
1510 }
1511 bool is_marked = IsMarked(referent);
1512 if (!is_marked && ((++counter) & 1)) {
1513 // Referent is white and biased toward saving, mark it.
1514 MarkObject(referent);
1515 is_marked = true;
1516 }
1517 if (!is_marked) {
1518 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001519 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001520 }
1521 }
1522 *list = clear;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001523 timings_.EndSplit();
1524
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001525 // Restart the mark with the newly black references added to the root set.
1526 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001527}
1528
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001529inline bool MarkSweep::IsMarked(const Object* object) const
1530 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001531 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001532 return true;
1533 }
1534 DCHECK(current_mark_bitmap_ != NULL);
1535 if (current_mark_bitmap_->HasAddress(object)) {
1536 return current_mark_bitmap_->Test(object);
1537 }
1538 return heap_->GetMarkBitmap()->Test(object);
1539}
1540
Carl Shapiro69759ea2011-07-21 18:13:35 -07001541// Unlink the reference list clearing references objects with white
1542// referents. Cleared references registered to a reference queue are
1543// scheduled for appending by the heap worker thread.
1544void MarkSweep::ClearWhiteReferences(Object** list) {
1545 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001546 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001547 Object* ref = heap_->DequeuePendingReference(list);
1548 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001549 if (referent != NULL && !IsMarked(referent)) {
1550 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001551 heap_->ClearReferenceReferent(ref);
1552 if (heap_->IsEnqueuable(ref)) {
1553 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001554 }
1555 }
1556 }
1557 DCHECK(*list == NULL);
1558}
1559
1560// Enqueues finalizer references with white referents. White
1561// referents are blackened, moved to the zombie field, and the
1562// referent field is cleared.
1563void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1564 DCHECK(list != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001565 timings_.StartSplit("EnqueueFinalizerReferences");
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001566 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001567 bool has_enqueued = false;
1568 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001569 Object* ref = heap_->DequeuePendingReference(list);
1570 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001571 if (referent != NULL && !IsMarked(referent)) {
1572 MarkObject(referent);
1573 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001574 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001575 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001576 heap_->ClearReferenceReferent(ref);
1577 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001578 has_enqueued = true;
1579 }
1580 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001581 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001582 if (has_enqueued) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001583 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001584 }
1585 DCHECK(*list == NULL);
1586}
1587
Carl Shapiro58551df2011-07-24 03:09:51 -07001588// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001589void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1590 Object** weak_references,
1591 Object** finalizer_references,
1592 Object** phantom_references) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001593 CHECK(soft_references != NULL);
1594 CHECK(weak_references != NULL);
1595 CHECK(finalizer_references != NULL);
1596 CHECK(phantom_references != NULL);
1597 CHECK(mark_stack_->IsEmpty());
Carl Shapiro69759ea2011-07-21 18:13:35 -07001598
1599 // Unless we are in the zygote or required to clear soft references
1600 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001601 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001602 PreserveSomeSoftReferences(soft_references);
1603 }
1604
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001605 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001606 // Clear all remaining soft and weak references with white
1607 // referents.
1608 ClearWhiteReferences(soft_references);
1609 ClearWhiteReferences(weak_references);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001610 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001611
1612 // Preserve all white objects with finalize methods and schedule
1613 // them for finalization.
1614 EnqueueFinalizerReferences(finalizer_references);
1615
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001616 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001617 // Clear all f-reachable soft and weak references with white
1618 // referents.
1619 ClearWhiteReferences(soft_references);
1620 ClearWhiteReferences(weak_references);
1621
1622 // Clear all phantom references with white referents.
1623 ClearWhiteReferences(phantom_references);
1624
1625 // At this point all reference lists should be empty.
1626 DCHECK(*soft_references == NULL);
1627 DCHECK(*weak_references == NULL);
1628 DCHECK(*finalizer_references == NULL);
1629 DCHECK(*phantom_references == NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001630 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001631}
1632
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001633void MarkSweep::UnBindBitmaps() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001634 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001635 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001636 if (space->IsDlMallocSpace()) {
1637 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001638 if (alloc_space->temp_bitmap_.get() != NULL) {
1639 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001640 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001641 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1642 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1643 alloc_space->mark_bitmap_.reset(new_bitmap);
1644 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1645 }
1646 }
1647 }
1648}
1649
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001650void MarkSweep::FinishPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001651 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1652 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001653 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001654 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001655 timings_.NewSplit("EnqueueClearedReferences");
Ian Rogers1d54e732013-05-02 21:10:01 -07001656 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001657
Anwar Ghuloum46543222013-08-12 09:28:42 -07001658 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001659 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001660
Anwar Ghuloum46543222013-08-12 09:28:42 -07001661 timings_.NewSplit("GrowForUtilization");
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001662 heap->GrowForUtilization(GetGcType(), GetDurationNs());
Mathieu Chartier65db8802012-11-20 12:36:46 -08001663
Anwar Ghuloum46543222013-08-12 09:28:42 -07001664 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001665 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001666
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001667 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001668 total_time_ns_ += GetDurationNs();
1669 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1670 std::plus<uint64_t>());
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001671 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1672 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001673
1674 // Ensure that the mark stack is empty.
1675 CHECK(mark_stack_->IsEmpty());
1676
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001677 if (kCountScannedTypes) {
1678 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1679 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001680 }
1681
1682 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001683 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001684 }
1685
1686 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001687 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001688 }
1689
1690 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001691 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001692 }
1693
1694 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001695 VLOG(gc) << "Classes marked " << classes_marked_;
1696 }
1697
1698 if (kCountJavaLangRefs) {
1699 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001700 }
1701
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001702 // Update the cumulative loggers.
1703 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001704 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001705 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001706
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001707 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001708 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001709 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001710 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001711 }
1712 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001713 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001714
1715 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001716 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001717 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001718}
1719
Ian Rogers1d54e732013-05-02 21:10:01 -07001720} // namespace collector
1721} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001722} // namespace art