blob: 11e911cf46a510f04640e2254d8e41ee6a5d4d40 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070037#include "indirect_reference_table.h"
38#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070039#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070040#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070042#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080047#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070050#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070051#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070052#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070053#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070054
Brian Carlstromea46f952013-07-30 01:26:50 -070055using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070056using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070057using ::art::mirror::Object;
58using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080059
Carl Shapiro69759ea2011-07-21 18:13:35 -070060namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070061namespace gc {
62namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070063
Mathieu Chartier02b6a782012-10-26 13:51:26 -070064// Performance options.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070065constexpr bool kUseRecursiveMark = false;
66constexpr bool kUseMarkStackPrefetch = true;
67constexpr size_t kSweepArrayChunkFreeSize = 1024;
68
69// Parallelism options.
70constexpr bool kParallelCardScan = true;
71constexpr bool kParallelRecursiveMark = true;
72// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
73// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
74// having this can add overhead in ProcessReferences since we may end up doing many calls of
75// ProcessMarkStack with very small mark stacks.
76constexpr size_t kMinimumParallelMarkStackSize = 128;
77constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070078
Mathieu Chartier02b6a782012-10-26 13:51:26 -070079// Profiling and information flags.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070080constexpr bool kCountClassesMarked = false;
81constexpr bool kProfileLargeObjects = false;
82constexpr bool kMeasureOverhead = false;
83constexpr bool kCountTasks = false;
84constexpr bool kCountJavaLangRefs = false;
85
86// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
87constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070088
Ian Rogers1d54e732013-05-02 21:10:01 -070089void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080090 // Bind live to mark bitmap if necessary.
91 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
92 BindLiveToMarkBitmap(space);
93 }
94
95 // Add the space to the immune region.
Mathieu Chartier590fee92013-09-13 13:46:47 -070096 // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
97 // callbacks.
Mathieu Chartier2b82db42012-11-14 17:29:05 -080098 if (immune_begin_ == NULL) {
99 DCHECK(immune_end_ == NULL);
100 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
101 reinterpret_cast<Object*>(space->End()));
102 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700103 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -0700104 // Find out if the previous space is immune.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700105 for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700106 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800108 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700109 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700110 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700111 // If previous space was immune, then extend the immune region. Relies on continuous spaces
112 // being sorted by Heap::AddContinuousSpace.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700113 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800114 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
115 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
116 }
117 }
118}
119
Mathieu Chartier590fee92013-09-13 13:46:47 -0700120bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700121 return
122 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
123 immune_end_ >= reinterpret_cast<Object*>(space->End());
124}
125
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800126void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700127 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800128 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700130 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700131 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800132 ImmuneSpace(space);
133 }
134 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700135 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800136}
137
Ian Rogers1d54e732013-05-02 21:10:01 -0700138MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
139 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700140 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -0700141 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
142 current_mark_bitmap_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700143 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700144 immune_begin_(NULL),
145 immune_end_(NULL),
146 soft_reference_list_(NULL),
147 weak_reference_list_(NULL),
148 finalizer_reference_list_(NULL),
149 phantom_reference_list_(NULL),
150 cleared_reference_list_(NULL),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800151 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800152 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700153 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700154 is_concurrent_(is_concurrent) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800155}
156
157void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700158 timings_.Reset();
Anwar Ghuloum46543222013-08-12 09:28:42 -0700159 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700160 mark_stack_ = heap_->mark_stack_.get();
161 DCHECK(mark_stack_ != nullptr);
162 SetImmuneRange(nullptr, nullptr);
163 soft_reference_list_ = nullptr;
164 weak_reference_list_ = nullptr;
165 finalizer_reference_list_ = nullptr;
166 phantom_reference_list_ = nullptr;
167 cleared_reference_list_ = nullptr;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800168 class_count_ = 0;
169 array_count_ = 0;
170 other_count_ = 0;
171 large_object_test_ = 0;
172 large_object_mark_ = 0;
173 classes_marked_ = 0;
174 overhead_time_ = 0;
175 work_chunks_created_ = 0;
176 work_chunks_deleted_ = 0;
177 reference_count_ = 0;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700178
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700179 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700180
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700181 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700182 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800183 heap_->PreGcVerification(this);
184}
185
186void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier9e452d12013-09-18 16:35:15 -0700187 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
190 &finalizer_reference_list_, &phantom_reference_list_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800191}
192
193bool MarkSweep::HandleDirtyObjectsPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700194 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800195 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800196 Locks::mutator_lock_->AssertExclusiveHeld(self);
197
198 {
199 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
200
201 // Re-mark root set.
202 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800203
204 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700205 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800206 }
207
208 ProcessReferences(self);
209
210 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700211 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
212 GetHeap()->verify_post_gc_heap_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800213 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
214 // This second sweep makes sure that we don't have any objects in the live stack which point to
215 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700216 SweepArray(GetHeap()->allocation_stack_.get(), false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800217 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700218
219 timings_.StartSplit("PreSweepingGcVerification");
220 heap_->PreSweepingGcVerification(this);
221 timings_.EndSplit();
222
223 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
224 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
225 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700226
227 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
228 // weak before we sweep them. Since this new system weak may not be marked, the GC may
229 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
230 // reference to a string that is about to be swept.
231 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800232 return true;
233}
234
235bool MarkSweep::IsConcurrent() const {
236 return is_concurrent_;
237}
238
239void MarkSweep::MarkingPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700240 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800241 Thread* self = Thread::Current();
242
243 BindBitmaps();
244 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700245
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800246 // Process dirty cards and add dirty cards to mod union tables.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700247 heap_->ProcessCards(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800248
249 // Need to do this before the checkpoint since we don't want any threads to add references to
250 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700251 timings_.NewSplit("SwapStacks");
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700252 heap_->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800253
254 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
255 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
256 // If we exclusively hold the mutator lock, all threads must be suspended.
257 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800258 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700259 MarkThreadRoots(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700260 // At this point the live stack should no longer have any mutators which push into it.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800262 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700263 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800264 MarkConcurrentRoots();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700265 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800266 MarkReachableObjects();
267}
268
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700269void MarkSweep::UpdateAndMarkModUnion() {
270 for (const auto& space : heap_->GetContinuousSpaces()) {
271 if (IsImmuneSpace(space)) {
272 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
273 "UpdateAndMarkImageModUnionTable";
274 base::TimingLogger::ScopedSplit split(name, &timings_);
275 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
276 CHECK(mod_union_table != nullptr);
277 mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this);
278 }
279 }
280}
281
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700282void MarkSweep::MarkThreadRoots(Thread* self) {
283 MarkRootsCheckpoint(self);
284}
285
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800286void MarkSweep::MarkReachableObjects() {
287 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
288 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700289 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700290 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700291 heap_->MarkAllocStackAsLive(live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800292 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700293 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800294 // Recursively mark all the non-image bits set in the mark bitmap.
295 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800296}
297
298void MarkSweep::ReclaimPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700299 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700300 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800301
302 if (!IsConcurrent()) {
303 ProcessReferences(self);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700304 }
305
306 {
307 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
308 SweepSystemWeaks();
309 }
310
311 if (IsConcurrent()) {
312 Runtime::Current()->AllowNewSystemWeaks();
313
Anwar Ghulouma9a50922013-08-09 21:34:20 -0700314 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700315 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700316 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier9642c962013-08-05 17:40:36 -0700317 // The allocation stack contains things allocated since the start of the GC. These may have been
318 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
319 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
320 // collection.
321 // There is a race here which is safely handled. Another thread such as the hprof could
322 // have flushed the alloc stack after we resumed the threads. This is safe however, since
323 // reseting the allocation stack zeros it out with madvise. This means that we will either
324 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
325 // first place.
326 mirror::Object** end = allocation_stack->End();
327 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700328 const Object* obj = *it;
Mathieu Chartier9642c962013-08-05 17:40:36 -0700329 if (obj != NULL) {
330 UnMarkObjectNonNull(obj);
331 }
332 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800333 }
334
335 // Before freeing anything, lets verify the heap.
336 if (kIsDebugBuild) {
337 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
338 VerifyImageRoots();
339 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800340
341 {
342 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
343
344 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700345 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800346
347 // Swap the live and mark bitmaps for each space which we modified space. This is an
348 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
349 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700350 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800351 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700352 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800353
354 // Unbind the live and mark bitmaps.
355 UnBindBitmaps();
356 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800357}
358
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800359void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
360 immune_begin_ = begin;
361 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700362}
363
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700364void MarkSweep::FindDefaultMarkBitmap() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700365 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700366 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700367 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
368 if (bitmap != nullptr &&
369 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
370 current_mark_bitmap_ = bitmap;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700371 CHECK(current_mark_bitmap_ != NULL);
372 return;
373 }
374 }
375 GetHeap()->DumpSpaces();
376 LOG(FATAL) << "Could not find a default mark bitmap";
377}
378
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800379void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700380 ResizeMarkStack(mark_stack_->Capacity() * 2);
381}
382
383void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800384 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800385 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
386 // Someone else acquired the lock and expanded the mark stack before us.
387 return;
388 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700389 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700390 CHECK_LE(mark_stack_->Size(), new_size);
391 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700392 for (const auto& obj : temp) {
393 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800394 }
395}
396
Mathieu Chartier9642c962013-08-05 17:40:36 -0700397inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800398 DCHECK(obj != NULL);
399 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700400 MutexLock mu(Thread::Current(), mark_stack_lock_);
401 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700402 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800403 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700404 // The object must be pushed on to the mark stack.
405 mark_stack_->PushBack(const_cast<Object*>(obj));
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800406 }
407}
408
Mathieu Chartier9642c962013-08-05 17:40:36 -0700409inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
410 DCHECK(!IsImmune(obj));
411 // Try to take advantage of locality of references within a space, failing this find the space
412 // the hard way.
413 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
414 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
415 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
416 if (LIKELY(new_bitmap != NULL)) {
417 object_bitmap = new_bitmap;
418 } else {
419 MarkLargeObject(obj, false);
420 return;
421 }
422 }
423
424 DCHECK(object_bitmap->HasAddress(obj));
425 object_bitmap->Clear(obj);
426}
427
428inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700429 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700430
Mathieu Chartier9642c962013-08-05 17:40:36 -0700431 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700432 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700433 return;
434 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700435
436 // Try to take advantage of locality of references within a space, failing this find the space
437 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700438 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700439 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700440 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
441 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700442 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700443 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700444 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700445 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700446 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700447 }
448
Carl Shapiro69759ea2011-07-21 18:13:35 -0700449 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700450 if (!object_bitmap->Test(obj)) {
451 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700452 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700453 // Lock is not needed but is here anyways to please annotalysis.
454 MutexLock mu(Thread::Current(), mark_stack_lock_);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700455 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700456 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700457 // The object must be pushed on to the mark stack.
458 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700459 }
460}
461
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700462// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700463bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700464 // TODO: support >1 discontinuous space.
465 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
466 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700467 if (kProfileLargeObjects) {
468 ++large_object_test_;
469 }
470 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700471 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700472 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
473 LOG(ERROR) << "Attempting see if it's a bad root";
474 VerifyRoots();
475 LOG(FATAL) << "Can't mark bad root";
476 }
477 if (kProfileLargeObjects) {
478 ++large_object_mark_;
479 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700480 if (set) {
481 large_objects->Set(obj);
482 } else {
483 large_objects->Clear(obj);
484 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700485 return true;
486 }
487 return false;
488}
489
490inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
491 DCHECK(obj != NULL);
492
Mathieu Chartier9642c962013-08-05 17:40:36 -0700493 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700494 DCHECK(IsMarked(obj));
495 return false;
496 }
497
498 // Try to take advantage of locality of references within a space, failing this find the space
499 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700500 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700501 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700502 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700503 if (new_bitmap != NULL) {
504 object_bitmap = new_bitmap;
505 } else {
506 // TODO: Remove the Thread::Current here?
507 // TODO: Convert this to some kind of atomic marking?
508 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700509 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700510 }
511 }
512
513 // Return true if the object was not previously marked.
514 return !object_bitmap->AtomicTestAndSet(obj);
515}
516
Carl Shapiro69759ea2011-07-21 18:13:35 -0700517// Used to mark objects when recursing. Recursion is done by moving
518// the finger across the bitmaps in address order and marking child
519// objects. Any newly-marked objects whose addresses are lower than
520// the finger won't be visited by the bitmap scan, so those objects
521// need to be added to the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700522inline void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700523 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700524 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700525 }
526}
527
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800528void MarkSweep::MarkRoot(const Object* obj) {
529 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700530 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800531 }
532}
533
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700534Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800535 DCHECK(root != NULL);
536 DCHECK(arg != NULL);
Mathieu Chartierba311b42013-08-27 13:02:30 -0700537 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700538 return root;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800539}
540
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700541Object* MarkSweep::MarkRootCallback(Object* root, void* arg) {
542 DCHECK(root != nullptr);
543 DCHECK(arg != nullptr);
544 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root);
545 return root;
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700546}
547
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700548void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800549 const StackVisitor* visitor) {
550 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700551}
552
Ian Rogers40e3bac2012-11-20 00:09:14 -0800553void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700554 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700555 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
556 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700557 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700558 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800559 if (visitor != NULL) {
560 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700561 }
562 }
563 }
564}
565
566void MarkSweep::VerifyRoots() {
567 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
568}
569
Carl Shapiro69759ea2011-07-21 18:13:35 -0700570// Marks all objects in the root set.
571void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700572 timings_.StartSplit("MarkRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700573 Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700574 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700575}
576
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700577void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700578 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700579 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700580 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700581}
582
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700583void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700584 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700585 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700586 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700587 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700588}
589
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700590void MarkSweep::CheckObject(const Object* obj) {
591 DCHECK(obj != NULL);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700592 VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref,
593 MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700594 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
595 CheckReference(obj, ref, offset, is_static);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700596 }, true);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700597}
598
599void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
600 DCHECK(root != NULL);
601 DCHECK(arg != NULL);
602 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700603 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700604 mark_sweep->CheckObject(root);
605}
606
Ian Rogers1d54e732013-05-02 21:10:01 -0700607void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
608 CHECK(space->IsDlMallocSpace());
609 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
610 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700611 accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700612 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700613}
614
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700615class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700616 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700617 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
618 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700619
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800620 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700621 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700622 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800623 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
624 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
625 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700626 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700627 }
628
629 private:
630 MarkSweep* const mark_sweep_;
631};
632
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700633template <bool kUseFinger = false>
634class MarkStackTask : public Task {
635 public:
636 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
637 const Object** mark_stack)
638 : mark_sweep_(mark_sweep),
639 thread_pool_(thread_pool),
640 mark_stack_pos_(mark_stack_size) {
641 // We may have to copy part of an existing mark stack when another mark stack overflows.
642 if (mark_stack_size != 0) {
643 DCHECK(mark_stack != NULL);
644 // TODO: Check performance?
645 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700646 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700647 if (kCountTasks) {
648 ++mark_sweep_->work_chunks_created_;
649 }
650 }
651
652 static const size_t kMaxSize = 1 * KB;
653
654 protected:
655 class ScanObjectParallelVisitor {
656 public:
657 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
658 : chunk_task_(chunk_task) {}
659
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700660 void operator()(Object* obj) const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700661 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
662 mark_sweep->ScanObjectVisit(obj,
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700663 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
664 bool /* is_static */) ALWAYS_INLINE {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700665 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
666 if (kUseFinger) {
667 android_memory_barrier();
668 if (reinterpret_cast<uintptr_t>(ref) >=
669 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
670 return;
671 }
672 }
673 chunk_task_->MarkStackPush(ref);
674 }
675 });
676 }
677
678 private:
679 MarkStackTask<kUseFinger>* const chunk_task_;
680 };
681
682 virtual ~MarkStackTask() {
683 // Make sure that we have cleared our mark stack.
684 DCHECK_EQ(mark_stack_pos_, 0U);
685 if (kCountTasks) {
686 ++mark_sweep_->work_chunks_deleted_;
687 }
688 }
689
690 MarkSweep* const mark_sweep_;
691 ThreadPool* const thread_pool_;
692 // Thread local mark stack for this task.
693 const Object* mark_stack_[kMaxSize];
694 // Mark stack position.
695 size_t mark_stack_pos_;
696
697 void MarkStackPush(const Object* obj) ALWAYS_INLINE {
698 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
699 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
700 mark_stack_pos_ /= 2;
701 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
702 mark_stack_ + mark_stack_pos_);
703 thread_pool_->AddTask(Thread::Current(), task);
704 }
705 DCHECK(obj != nullptr);
706 DCHECK(mark_stack_pos_ < kMaxSize);
707 mark_stack_[mark_stack_pos_++] = obj;
708 }
709
710 virtual void Finalize() {
711 delete this;
712 }
713
714 // Scans all of the objects
715 virtual void Run(Thread* self) {
716 ScanObjectParallelVisitor visitor(this);
717 // TODO: Tune this.
718 static const size_t kFifoSize = 4;
719 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
720 for (;;) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700721 const Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700722 if (kUseMarkStackPrefetch) {
723 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
724 const Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700725 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700726 __builtin_prefetch(obj);
727 prefetch_fifo.push_back(obj);
728 }
729 if (UNLIKELY(prefetch_fifo.empty())) {
730 break;
731 }
732 obj = prefetch_fifo.front();
733 prefetch_fifo.pop_front();
734 } else {
735 if (UNLIKELY(mark_stack_pos_ == 0)) {
736 break;
737 }
738 obj = mark_stack_[--mark_stack_pos_];
739 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700740 DCHECK(obj != nullptr);
741 visitor(const_cast<mirror::Object*>(obj));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700742 }
743 }
744};
745
746class CardScanTask : public MarkStackTask<false> {
747 public:
748 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
749 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
750 const Object** mark_stack_obj)
751 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
752 bitmap_(bitmap),
753 begin_(begin),
754 end_(end),
755 minimum_age_(minimum_age) {
756 }
757
758 protected:
759 accounting::SpaceBitmap* const bitmap_;
760 byte* const begin_;
761 byte* const end_;
762 const byte minimum_age_;
763
764 virtual void Finalize() {
765 delete this;
766 }
767
768 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
769 ScanObjectParallelVisitor visitor(this);
770 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700771 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700772 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
773 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700774 // Finish by emptying our local mark stack.
775 MarkStackTask::Run(self);
776 }
777};
778
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700779size_t MarkSweep::GetThreadCount(bool paused) const {
780 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
781 return 0;
782 }
783 if (paused) {
784 return heap_->GetParallelGCThreadCount() + 1;
785 } else {
786 return heap_->GetConcGCThreadCount() + 1;
787 }
788}
789
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700790void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
791 accounting::CardTable* card_table = GetHeap()->GetCardTable();
792 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700793 size_t thread_count = GetThreadCount(paused);
794 // The parallel version with only one thread is faster for card scanning, TODO: fix.
795 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700796 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700797 // Can't have a different split for each space since multiple spaces can have their cards being
798 // scanned at the same time.
799 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
800 // Try to take some of the mark stack since we can pass this off to the worker tasks.
801 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
802 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
Mathieu Chartier720ef762013-08-17 14:46:54 -0700803 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700804 // Estimated number of work tasks we will create.
805 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
806 DCHECK_NE(mark_stack_tasks, 0U);
807 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
808 mark_stack_size / mark_stack_tasks + 1);
809 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700810 if (space->GetMarkBitmap() == nullptr) {
811 continue;
812 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700813 byte* card_begin = space->Begin();
814 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800815 // Align up the end address. For example, the image space's end
816 // may not be card-size-aligned.
817 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
818 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
819 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700820 // Calculate how many bytes of heap we will scan,
821 const size_t address_range = card_end - card_begin;
822 // Calculate how much address range each task gets.
823 const size_t card_delta = RoundUp(address_range / thread_count + 1,
824 accounting::CardTable::kCardSize);
825 // Create the worker tasks for this space.
826 while (card_begin != card_end) {
827 // Add a range of cards.
828 size_t addr_remaining = card_end - card_begin;
829 size_t card_increment = std::min(card_delta, addr_remaining);
830 // Take from the back of the mark stack.
831 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
832 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
833 mark_stack_end -= mark_stack_increment;
834 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
835 DCHECK_EQ(mark_stack_end, mark_stack_->End());
836 // Add the new task to the thread pool.
837 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
838 card_begin + card_increment, minimum_age,
839 mark_stack_increment, mark_stack_end);
840 thread_pool->AddTask(self, task);
841 card_begin += card_increment;
842 }
843 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700844
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800845 // Note: the card scan below may dirty new cards (and scan them)
846 // as a side effect when a Reference object is encountered and
847 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700848 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700849 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700850 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700851 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700852 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700853 } else {
854 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700855 if (space->GetMarkBitmap() != nullptr) {
856 // Image spaces are handled properly since live == marked for them.
857 switch (space->GetGcRetentionPolicy()) {
858 case space::kGcRetentionPolicyNeverCollect:
859 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
860 "ScanGrayImageSpaceObjects");
861 break;
862 case space::kGcRetentionPolicyFullCollect:
863 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
864 "ScanGrayZygoteSpaceObjects");
865 break;
866 case space::kGcRetentionPolicyAlwaysCollect:
867 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
868 "ScanGrayAllocSpaceObjects");
869 break;
870 }
871 ScanObjectVisitor visitor(this);
872 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
873 timings_.EndSplit();
874 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700875 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700876 }
877}
878
879void MarkSweep::VerifyImageRoots() {
880 // Verify roots ensures that all the references inside the image space point
881 // objects which are either in the image space or marked objects in the alloc
882 // space
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700883 timings_.StartSplit("VerifyImageRoots");
Mathieu Chartier02e25112013-08-14 16:14:24 -0700884 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
885 if (space->IsImageSpace()) {
886 space::ImageSpace* image_space = space->AsImageSpace();
887 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
888 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
889 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700890 DCHECK(live_bitmap != NULL);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700891 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
892 if (kCheckLocks) {
893 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
894 }
895 DCHECK(obj != NULL);
896 CheckObject(obj);
897 });
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700898 }
899 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700900 timings_.EndSplit();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700901}
902
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700903class RecursiveMarkTask : public MarkStackTask<false> {
904 public:
905 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
906 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
907 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
908 bitmap_(bitmap),
909 begin_(begin),
910 end_(end) {
911 }
912
913 protected:
914 accounting::SpaceBitmap* const bitmap_;
915 const uintptr_t begin_;
916 const uintptr_t end_;
917
918 virtual void Finalize() {
919 delete this;
920 }
921
922 // Scans all of the objects
923 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
924 ScanObjectParallelVisitor visitor(this);
925 bitmap_->VisitMarkedRange(begin_, end_, visitor);
926 // Finish by emptying our local mark stack.
927 MarkStackTask::Run(self);
928 }
929};
930
Carl Shapiro58551df2011-07-24 03:09:51 -0700931// Populates the mark stack based on the set of marked objects and
932// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800933void MarkSweep::RecursiveMark() {
Anwar Ghuloum46543222013-08-12 09:28:42 -0700934 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700935 // RecursiveMark will build the lists of known instances of the Reference classes.
936 // See DelayReferenceReferent for details.
937 CHECK(soft_reference_list_ == NULL);
938 CHECK(weak_reference_list_ == NULL);
939 CHECK(finalizer_reference_list_ == NULL);
940 CHECK(phantom_reference_list_ == NULL);
941 CHECK(cleared_reference_list_ == NULL);
942
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700943 if (kUseRecursiveMark) {
944 const bool partial = GetGcType() == kGcTypePartial;
945 ScanObjectVisitor scan_visitor(this);
946 auto* self = Thread::Current();
947 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700948 size_t thread_count = GetThreadCount(false);
949 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700950 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700951 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700952 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
953 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800954 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700955 if (current_mark_bitmap_ == nullptr) {
956 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800957 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700958 if (parallel) {
959 // We will use the mark stack the future.
960 // CHECK(mark_stack_->IsEmpty());
961 // This function does not handle heap end increasing, so we must use the space end.
962 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
963 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
964 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
965
966 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700967 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700968 while (begin != end) {
969 uintptr_t start = begin;
970 uintptr_t delta = (end - begin) / n;
971 delta = RoundUp(delta, KB);
972 if (delta < 16 * KB) delta = end - begin;
973 begin += delta;
974 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
975 begin);
976 thread_pool->AddTask(self, task);
977 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700978 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700979 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700980 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700981 thread_pool->StopWorkers(self);
982 } else {
983 // This function does not handle heap end increasing, so we must use the space end.
984 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
985 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
986 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
987 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700988 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700989 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700990 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700991 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700992}
993
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700994mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700995 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700996 return object;
997 }
998 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700999}
1000
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001001void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
1002 ScanGrayObjects(paused, minimum_age);
1003 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001004}
1005
Carl Shapiro58551df2011-07-24 03:09:51 -07001006void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001007 timings_.StartSplit("ReMarkRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -07001008 Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001009 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001010}
1011
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001012void MarkSweep::SweepSystemWeaks() {
1013 Runtime* runtime = Runtime::Current();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001014 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001015 runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001016 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -07001017}
1018
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001019mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001020 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1021 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001022 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001023}
1024
1025void MarkSweep::VerifyIsLive(const Object* obj) {
1026 Heap* heap = GetHeap();
1027 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001028 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001029 if (!large_object_space->GetLiveObjects()->Test(obj)) {
1030 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1031 heap->allocation_stack_->End()) {
1032 // Object not found!
1033 heap->DumpSpaces();
1034 LOG(FATAL) << "Found dead object " << obj;
1035 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001036 }
1037 }
1038}
1039
1040void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001041 // Verify system weaks, uses a special object visitor which returns the input object.
1042 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001043}
1044
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001045struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001046 MarkSweep* mark_sweep;
Ian Rogers1d54e732013-05-02 21:10:01 -07001047 space::AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001048 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001049};
1050
Mathieu Chartier0e4627e2012-10-23 16:13:36 -07001051class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001052 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001053 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001054
1055 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001056 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001057 // Note: self is not necessarily equal to thread since thread may be suspended.
1058 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001059 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1060 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -08001061 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001062 ATRACE_END();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001063 mark_sweep_->GetBarrier().Pass(self);
1064 }
1065
1066 private:
1067 MarkSweep* mark_sweep_;
1068};
1069
Ian Rogers1d54e732013-05-02 21:10:01 -07001070void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001071 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001072 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001073 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001074 // Request the check point is run on all threads returning a count of the threads that must
1075 // run through the barrier including self.
1076 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1077 // Release locks then wait for all mutator threads to pass the barrier.
1078 // TODO: optimize to not release locks when there are no threads to wait for.
1079 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1080 Locks::mutator_lock_->SharedUnlock(self);
1081 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1082 CHECK_EQ(old_state, kWaitingPerformingGc);
1083 gc_barrier_->Increment(self, barrier_count);
1084 self->SetState(kWaitingPerformingGc);
1085 Locks::mutator_lock_->SharedLock(self);
1086 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001087 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001088}
1089
Ian Rogers30fab402012-01-23 15:43:46 -08001090void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001091 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001092 MarkSweep* mark_sweep = context->mark_sweep;
1093 Heap* heap = mark_sweep->GetHeap();
Ian Rogers1d54e732013-05-02 21:10:01 -07001094 space::AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -07001095 Thread* self = context->self;
1096 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -07001097 // Use a bulk free, that merges consecutive objects before freeing or free per object?
1098 // Documentation suggests better free performance with merging, but this may be at the expensive
1099 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001100 size_t freed_objects = num_ptrs;
1101 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
1102 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001103 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001104 mark_sweep->freed_objects_.fetch_add(freed_objects);
1105 mark_sweep->freed_bytes_.fetch_add(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001106}
1107
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001108void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001109 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -07001110 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001111 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001112 // We don't free any actual memory to avoid dirtying the shared zygote pages.
1113 for (size_t i = 0; i < num_ptrs; ++i) {
1114 Object* obj = static_cast<Object*>(ptrs[i]);
1115 heap->GetLiveBitmap()->Clear(obj);
1116 heap->GetCardTable()->MarkCard(obj);
1117 }
1118}
1119
Ian Rogers1d54e732013-05-02 21:10:01 -07001120void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001121 space::DlMallocSpace* space = heap_->GetNonMovingSpace();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001122 timings_.StartSplit("SweepArray");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001123 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
1124 // going to free.
Ian Rogers1d54e732013-05-02 21:10:01 -07001125 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1126 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1127 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1128 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1129 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001130 if (swap_bitmaps) {
1131 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001132 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001133 }
1134
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001135 size_t freed_bytes = 0;
1136 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001137 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001138 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001139 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001140 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001141 Object** out = objects;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001142 Object** objects_to_chunk_free = out;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001143
1144 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -07001145 Thread* self = Thread::Current();
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001146 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001147 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001148 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
1149 if (LIKELY(mark_bitmap->HasAddress(obj))) {
1150 if (!mark_bitmap->Test(obj)) {
1151 // Don't bother un-marking since we clear the mark bitmap anyways.
1152 *(out++) = obj;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001153 // Free objects in chunks.
1154 DCHECK_GE(out, objects_to_chunk_free);
1155 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1156 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001157 timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001158 size_t chunk_freed_objects = out - objects_to_chunk_free;
1159 freed_objects += chunk_freed_objects;
1160 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
1161 objects_to_chunk_free = out;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001162 timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001163 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001164 }
1165 } else if (!large_mark_objects->Test(obj)) {
1166 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001167 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001168 }
1169 }
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001170 // Free the remaining objects in chunks.
1171 DCHECK_GE(out, objects_to_chunk_free);
1172 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize);
1173 if (out - objects_to_chunk_free > 0) {
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001174 timings_.StartSplit("FreeList");
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001175 size_t chunk_freed_objects = out - objects_to_chunk_free;
1176 freed_objects += chunk_freed_objects;
1177 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001178 timings_.EndSplit();
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001179 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001180 CHECK_EQ(count, allocations->Size());
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001181 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001182
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001183 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001184 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001185 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001186 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001187 freed_objects_.fetch_add(freed_objects);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001188 freed_large_objects_.fetch_add(freed_large_objects);
Mathieu Chartier4b95e8f2013-07-15 16:32:50 -07001189 freed_bytes_.fetch_add(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001190 freed_large_object_bytes_.fetch_add(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001191 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001192
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001193 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001194 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001195 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001196}
1197
Ian Rogers1d54e732013-05-02 21:10:01 -07001198void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001199 DCHECK(mark_stack_->IsEmpty());
Anwar Ghuloum46543222013-08-12 09:28:42 -07001200 base::TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001201
Ian Rogers1d54e732013-05-02 21:10:01 -07001202 const bool partial = (GetGcType() == kGcTypePartial);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001203 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001204 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -07001205 scc.self = Thread::Current();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001206 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001207 if (!space->IsDlMallocSpace()) {
1208 continue;
1209 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001210 // We always sweep always collect spaces.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001211 bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
Ian Rogers1d54e732013-05-02 21:10:01 -07001212 if (!partial && !sweep_space) {
1213 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
1214 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
1215 }
1216 if (sweep_space) {
Mathieu Chartier720ef762013-08-17 14:46:54 -07001217 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1218 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers1d54e732013-05-02 21:10:01 -07001219 scc.space = space->AsDlMallocSpace();
1220 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1221 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001222 if (swap_bitmaps) {
1223 std::swap(live_bitmap, mark_bitmap);
1224 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001225 if (!space->IsZygoteSpace()) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001226 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001227 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -07001228 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1229 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001230 } else {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001231 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001232 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
1233 // memory.
1234 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1235 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001236 }
Carl Shapiro58551df2011-07-24 03:09:51 -07001237 }
1238 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001239
1240 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001241}
1242
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001243void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001244 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001245 // Sweep large objects
Ian Rogers1d54e732013-05-02 21:10:01 -07001246 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1247 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1248 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001249 if (swap_bitmaps) {
1250 std::swap(large_live_objects, large_mark_objects);
1251 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001252 // O(n*log(n)) but hopefully there are not too many large objects.
1253 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001254 size_t freed_bytes = 0;
Ian Rogers50b35e22012-10-04 10:09:15 -07001255 Thread* self = Thread::Current();
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001256 for (const Object* obj : large_live_objects->GetObjects()) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001257 if (!large_mark_objects->Test(obj)) {
1258 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001259 ++freed_objects;
1260 }
1261 }
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001262 freed_large_objects_.fetch_add(freed_objects);
1263 freed_large_object_bytes_.fetch_add(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001264 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001265}
1266
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001267void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001268 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001269 if (space->IsDlMallocSpace() && space->Contains(ref)) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001270 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001271
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001272 bool is_marked = IsMarked(ref);
1273 if (!is_marked) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001274 LOG(INFO) << *space;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001275 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1276 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1277 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1278 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001279
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001280 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1281 DCHECK(klass != NULL);
Brian Carlstromea46f952013-07-30 01:26:50 -07001282 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001283 DCHECK(fields != NULL);
1284 bool found = false;
1285 for (int32_t i = 0; i < fields->GetLength(); ++i) {
Brian Carlstromea46f952013-07-30 01:26:50 -07001286 const ArtField* cur = fields->Get(i);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001287 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1288 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1289 found = true;
1290 break;
1291 }
1292 }
1293 if (!found) {
1294 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1295 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001296
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001297 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1298 if (!obj_marked) {
1299 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1300 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1301 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001302 }
1303 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001304 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001305 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001306 }
1307}
1308
Carl Shapiro69759ea2011-07-21 18:13:35 -07001309// Process the "referent" field in a java.lang.ref.Reference. If the
1310// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001311// the heap for later processing.
1312void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1313 DCHECK(klass != nullptr);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001314 DCHECK(klass->IsReferenceClass());
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001315 DCHECK(obj != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001316 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001317 if (referent != NULL && !IsMarked(referent)) {
1318 if (kCountJavaLangRefs) {
1319 ++reference_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001320 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001321 Thread* self = Thread::Current();
1322 // TODO: Remove these locks, and use atomic stacks for storing references?
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001323 // We need to check that the references haven't already been enqueued since we can end up
1324 // scanning the same reference multiple times due to dirty cards.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001325 if (klass->IsSoftReferenceClass()) {
1326 MutexLock mu(self, *heap_->GetSoftRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001327 if (!heap_->IsEnqueued(obj)) {
1328 heap_->EnqueuePendingReference(obj, &soft_reference_list_);
1329 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001330 } else if (klass->IsWeakReferenceClass()) {
1331 MutexLock mu(self, *heap_->GetWeakRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001332 if (!heap_->IsEnqueued(obj)) {
1333 heap_->EnqueuePendingReference(obj, &weak_reference_list_);
1334 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001335 } else if (klass->IsFinalizerReferenceClass()) {
1336 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001337 if (!heap_->IsEnqueued(obj)) {
1338 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
1339 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001340 } else if (klass->IsPhantomReferenceClass()) {
1341 MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
Mathieu Chartierb4ea4de2013-09-18 09:58:29 -07001342 if (!heap_->IsEnqueued(obj)) {
1343 heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
1344 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001345 } else {
1346 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass)
1347 << " " << std::hex << klass->GetAccessFlags();
1348 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001349 }
1350}
1351
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001352class MarkObjectVisitor {
1353 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001354 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001355
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001356 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001357 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001358 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001359 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001360 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001361 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1362 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1363 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001364 mark_sweep_->MarkObject(ref);
1365 }
1366
1367 private:
1368 MarkSweep* const mark_sweep_;
1369};
1370
Carl Shapiro69759ea2011-07-21 18:13:35 -07001371// Scans an object reference. Determines the type of the reference
1372// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001373void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001374 MarkObjectVisitor visitor(this);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001375 ScanObjectVisit(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001376}
1377
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001378void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001379 Thread* self = Thread::Current();
1380 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001381 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1382 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001383 CHECK_GT(chunk_size, 0U);
1384 // Split the current mark stack up into work tasks.
1385 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1386 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1387 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1388 const_cast<const mirror::Object**>(it)));
1389 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001390 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001391 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001392 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001393 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001394 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001395 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001396 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001397}
1398
Ian Rogers5d76c432011-10-31 21:42:49 -07001399// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001400void MarkSweep::ProcessMarkStack(bool paused) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001401 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001402 size_t thread_count = GetThreadCount(paused);
1403 if (kParallelProcessMarkStack && thread_count > 1 &&
1404 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1405 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001406 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001407 // TODO: Tune this.
1408 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001409 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001410 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001411 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001412 if (kUseMarkStackPrefetch) {
1413 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001414 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001415 DCHECK(obj != NULL);
1416 __builtin_prefetch(obj);
1417 prefetch_fifo.push_back(obj);
1418 }
1419 if (prefetch_fifo.empty()) {
1420 break;
1421 }
1422 obj = prefetch_fifo.front();
1423 prefetch_fifo.pop_front();
1424 } else {
1425 if (mark_stack_->IsEmpty()) {
1426 break;
1427 }
1428 obj = mark_stack_->PopBack();
1429 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001430 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001431 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001432 }
1433 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001434 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001435}
1436
Carl Shapiro69759ea2011-07-21 18:13:35 -07001437// Walks the reference list marking any references subject to the
1438// reference clearing policy. References with a black referent are
1439// removed from the list. References with white referents biased
1440// toward saving are blackened and also removed from the list.
1441void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1442 DCHECK(list != NULL);
1443 Object* clear = NULL;
1444 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001445
1446 DCHECK(mark_stack_->IsEmpty());
1447
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001448 timings_.StartSplit("PreserveSomeSoftReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001449 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001450 Object* ref = heap_->DequeuePendingReference(list);
1451 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001452 if (referent == NULL) {
1453 // Referent was cleared by the user during marking.
1454 continue;
1455 }
1456 bool is_marked = IsMarked(referent);
1457 if (!is_marked && ((++counter) & 1)) {
1458 // Referent is white and biased toward saving, mark it.
1459 MarkObject(referent);
1460 is_marked = true;
1461 }
1462 if (!is_marked) {
1463 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001464 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001465 }
1466 }
1467 *list = clear;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001468 timings_.EndSplit();
1469
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001470 // Restart the mark with the newly black references added to the root set.
1471 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001472}
1473
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001474inline bool MarkSweep::IsMarked(const Object* object) const
1475 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001476 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001477 return true;
1478 }
1479 DCHECK(current_mark_bitmap_ != NULL);
1480 if (current_mark_bitmap_->HasAddress(object)) {
1481 return current_mark_bitmap_->Test(object);
1482 }
1483 return heap_->GetMarkBitmap()->Test(object);
1484}
1485
Carl Shapiro69759ea2011-07-21 18:13:35 -07001486// Unlink the reference list clearing references objects with white
1487// referents. Cleared references registered to a reference queue are
1488// scheduled for appending by the heap worker thread.
1489void MarkSweep::ClearWhiteReferences(Object** list) {
1490 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001491 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001492 Object* ref = heap_->DequeuePendingReference(list);
1493 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001494 if (referent != NULL && !IsMarked(referent)) {
1495 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001496 heap_->ClearReferenceReferent(ref);
1497 if (heap_->IsEnqueuable(ref)) {
1498 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001499 }
1500 }
1501 }
1502 DCHECK(*list == NULL);
1503}
1504
1505// Enqueues finalizer references with white referents. White
1506// referents are blackened, moved to the zombie field, and the
1507// referent field is cleared.
1508void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1509 DCHECK(list != NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001510 timings_.StartSplit("EnqueueFinalizerReferences");
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001511 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001512 bool has_enqueued = false;
1513 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001514 Object* ref = heap_->DequeuePendingReference(list);
1515 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001516 if (referent != NULL && !IsMarked(referent)) {
1517 MarkObject(referent);
1518 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001519 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001520 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001521 heap_->ClearReferenceReferent(ref);
1522 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001523 has_enqueued = true;
1524 }
1525 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001526 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001527 if (has_enqueued) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001528 ProcessMarkStack(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001529 }
1530 DCHECK(*list == NULL);
1531}
1532
Carl Shapiro58551df2011-07-24 03:09:51 -07001533// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001534void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1535 Object** weak_references,
1536 Object** finalizer_references,
1537 Object** phantom_references) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -07001538 CHECK(soft_references != NULL);
1539 CHECK(weak_references != NULL);
1540 CHECK(finalizer_references != NULL);
1541 CHECK(phantom_references != NULL);
1542 CHECK(mark_stack_->IsEmpty());
Carl Shapiro69759ea2011-07-21 18:13:35 -07001543
1544 // Unless we are in the zygote or required to clear soft references
1545 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001546 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001547 PreserveSomeSoftReferences(soft_references);
1548 }
1549
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001550 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001551 // Clear all remaining soft and weak references with white
1552 // referents.
1553 ClearWhiteReferences(soft_references);
1554 ClearWhiteReferences(weak_references);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001555 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001556
1557 // Preserve all white objects with finalize methods and schedule
1558 // them for finalization.
1559 EnqueueFinalizerReferences(finalizer_references);
1560
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001561 timings_.StartSplit("ProcessReferences");
Carl Shapiro69759ea2011-07-21 18:13:35 -07001562 // Clear all f-reachable soft and weak references with white
1563 // referents.
1564 ClearWhiteReferences(soft_references);
1565 ClearWhiteReferences(weak_references);
1566
1567 // Clear all phantom references with white referents.
1568 ClearWhiteReferences(phantom_references);
1569
1570 // At this point all reference lists should be empty.
1571 DCHECK(*soft_references == NULL);
1572 DCHECK(*weak_references == NULL);
1573 DCHECK(*finalizer_references == NULL);
1574 DCHECK(*phantom_references == NULL);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001575 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001576}
1577
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001578void MarkSweep::UnBindBitmaps() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001579 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001580 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001581 if (space->IsDlMallocSpace()) {
1582 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001583 if (alloc_space->temp_bitmap_.get() != NULL) {
1584 // At this point, the temp_bitmap holds our old mark bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -07001585 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001586 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1587 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1588 alloc_space->mark_bitmap_.reset(new_bitmap);
1589 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1590 }
1591 }
1592 }
1593}
1594
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001595void MarkSweep::FinishPhase() {
Anwar Ghuloum46543222013-08-12 09:28:42 -07001596 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1597 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001598 Object* cleared_references = GetClearedReferences();
Ian Rogers1d54e732013-05-02 21:10:01 -07001599 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001600 timings_.NewSplit("EnqueueClearedReferences");
Ian Rogers1d54e732013-05-02 21:10:01 -07001601 heap->EnqueueClearedReferences(&cleared_references);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001602
Anwar Ghuloum46543222013-08-12 09:28:42 -07001603 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001604 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001605
Anwar Ghuloum46543222013-08-12 09:28:42 -07001606 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001607 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001608
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001609 // Update the cumulative statistics
Ian Rogers1d54e732013-05-02 21:10:01 -07001610 total_time_ns_ += GetDurationNs();
1611 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1612 std::plus<uint64_t>());
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001613 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1614 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001615
1616 // Ensure that the mark stack is empty.
1617 CHECK(mark_stack_->IsEmpty());
1618
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001619 if (kCountScannedTypes) {
1620 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1621 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001622 }
1623
1624 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001625 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001626 }
1627
1628 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001629 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001630 }
1631
1632 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001633 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001634 }
1635
1636 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001637 VLOG(gc) << "Classes marked " << classes_marked_;
1638 }
1639
1640 if (kCountJavaLangRefs) {
1641 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001642 }
1643
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001644 // Update the cumulative loggers.
1645 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001646 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001647 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001648
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001649 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001650 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001651 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
1652 if (bitmap != nullptr &&
1653 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1654 bitmap->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001655 }
1656 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001657 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001658
1659 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001660 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001661 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001662}
1663
Ian Rogers1d54e732013-05-02 21:10:01 -07001664} // namespace collector
1665} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001666} // namespace art