blob: de9f59edf38b0a6fd08a6ff847c2beba65a4bc7d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070037#include "indirect_reference_table.h"
38#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070039#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070040#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070042#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080047#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070050#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070051#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070052#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070053#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070054
Brian Carlstromea46f952013-07-30 01:26:50 -070055using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070056using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070057using ::art::mirror::Object;
58using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080059
Carl Shapiro69759ea2011-07-21 18:13:35 -070060namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070061namespace gc {
62namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070063
Mathieu Chartier02b6a782012-10-26 13:51:26 -070064// Performance options.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070065constexpr bool kUseRecursiveMark = false;
66constexpr bool kUseMarkStackPrefetch = true;
67constexpr size_t kSweepArrayChunkFreeSize = 1024;
68
69// Parallelism options.
70constexpr bool kParallelCardScan = true;
71constexpr bool kParallelRecursiveMark = true;
72// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
73// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
74// having this can add overhead in ProcessReferences since we may end up doing many calls of
75// ProcessMarkStack with very small mark stacks.
76constexpr size_t kMinimumParallelMarkStackSize = 128;
77constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070078
Mathieu Chartier02b6a782012-10-26 13:51:26 -070079// Profiling and information flags.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070080constexpr bool kCountClassesMarked = false;
81constexpr bool kProfileLargeObjects = false;
82constexpr bool kMeasureOverhead = false;
83constexpr bool kCountTasks = false;
84constexpr bool kCountJavaLangRefs = false;
85
86// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
87constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070088
Ian Rogers1d54e732013-05-02 21:10:01 -070089void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080090 // Bind live to mark bitmap if necessary.
91 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080092 CHECK(space->IsContinuousMemMapAllocSpace());
93 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -080094 }
95
96 // Add the space to the immune region.
Mathieu Chartier590fee92013-09-13 13:46:47 -070097 // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
98 // callbacks.
Mathieu Chartier2b82db42012-11-14 17:29:05 -080099 if (immune_begin_ == NULL) {
100 DCHECK(immune_end_ == NULL);
101 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
102 reinterpret_cast<Object*>(space->End()));
103 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700104 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -0700105 // Find out if the previous space is immune.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700106 for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700107 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800109 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700110 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700111 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700112 // If previous space was immune, then extend the immune region. Relies on continuous spaces
113 // being sorted by Heap::AddContinuousSpace.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700114 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800115 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
116 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
117 }
118 }
119}
120
Mathieu Chartier590fee92013-09-13 13:46:47 -0700121bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700122 return
123 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
124 immune_end_ >= reinterpret_cast<Object*>(space->End());
125}
126
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800127void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700128 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800130 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700131 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700132 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800133 ImmuneSpace(space);
134 }
135 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700136 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800137}
138
Ian Rogers1d54e732013-05-02 21:10:01 -0700139MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
140 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700141 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -0700142 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
143 current_mark_bitmap_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700144 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 immune_begin_(NULL),
146 immune_end_(NULL),
Ian Rogers906457c2013-11-13 23:28:08 -0800147 live_stack_freeze_size_(0),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800148 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800149 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700150 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700151 is_concurrent_(is_concurrent) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800152}
153
154void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700155 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800156 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700157 mark_stack_ = heap_->mark_stack_.get();
158 DCHECK(mark_stack_ != nullptr);
159 SetImmuneRange(nullptr, nullptr);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800160 class_count_ = 0;
161 array_count_ = 0;
162 other_count_ = 0;
163 large_object_test_ = 0;
164 large_object_mark_ = 0;
165 classes_marked_ = 0;
166 overhead_time_ = 0;
167 work_chunks_created_ = 0;
168 work_chunks_deleted_ = 0;
169 reference_count_ = 0;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700170
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700171 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700172
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700173 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700174 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800175 heap_->PreGcVerification(this);
176}
177
178void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800179 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800180 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800181 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
182 &RecursiveMarkObjectCallback, this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800183}
184
185bool MarkSweep::HandleDirtyObjectsPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800186 TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800188 Locks::mutator_lock_->AssertExclusiveHeld(self);
189
190 {
191 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
192
193 // Re-mark root set.
194 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800195
196 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700197 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800198 }
199
200 ProcessReferences(self);
201
202 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700203 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
204 GetHeap()->verify_post_gc_heap_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800205 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
206 // This second sweep makes sure that we don't have any objects in the live stack which point to
207 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700208 SweepArray(GetHeap()->allocation_stack_.get(), false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800209 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700210
211 timings_.StartSplit("PreSweepingGcVerification");
212 heap_->PreSweepingGcVerification(this);
213 timings_.EndSplit();
214
215 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
216 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
217 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700218
219 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
220 // weak before we sweep them. Since this new system weak may not be marked, the GC may
221 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
222 // reference to a string that is about to be swept.
223 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800224 return true;
225}
226
227bool MarkSweep::IsConcurrent() const {
228 return is_concurrent_;
229}
230
231void MarkSweep::MarkingPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800232 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800233 Thread* self = Thread::Current();
234
235 BindBitmaps();
236 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700237
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800238 // Process dirty cards and add dirty cards to mod union tables.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700239 heap_->ProcessCards(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240
241 // Need to do this before the checkpoint since we don't want any threads to add references to
242 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700243 timings_.NewSplit("SwapStacks");
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700244 heap_->SwapStacks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800245
246 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
247 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
248 // If we exclusively hold the mutator lock, all threads must be suspended.
249 MarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800250 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700251 MarkThreadRoots(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700252 // At this point the live stack should no longer have any mutators which push into it.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800253 MarkNonThreadRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800254 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700255 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800256 MarkConcurrentRoots();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700257 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800258 MarkReachableObjects();
259}
260
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700261void MarkSweep::UpdateAndMarkModUnion() {
262 for (const auto& space : heap_->GetContinuousSpaces()) {
263 if (IsImmuneSpace(space)) {
264 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
265 "UpdateAndMarkImageModUnionTable";
Ian Rogers5fe9af72013-11-14 00:17:20 -0800266 TimingLogger::ScopedSplit split(name, &timings_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700267 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
268 CHECK(mod_union_table != nullptr);
269 mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this);
270 }
271 }
272}
273
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700274void MarkSweep::MarkThreadRoots(Thread* self) {
275 MarkRootsCheckpoint(self);
276}
277
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800278void MarkSweep::MarkReachableObjects() {
279 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
280 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700281 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700282 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700283 heap_->MarkAllocStackAsLive(live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800284 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700285 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800286 // Recursively mark all the non-image bits set in the mark bitmap.
287 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800288}
289
290void MarkSweep::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800291 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700292 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800293
294 if (!IsConcurrent()) {
295 ProcessReferences(self);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700296 }
297
298 {
299 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
300 SweepSystemWeaks();
301 }
302
303 if (IsConcurrent()) {
304 Runtime::Current()->AllowNewSystemWeaks();
305
Ian Rogers5fe9af72013-11-14 00:17:20 -0800306 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700307 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700308 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartier9642c962013-08-05 17:40:36 -0700309 // The allocation stack contains things allocated since the start of the GC. These may have been
310 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
311 // Remove these objects from the mark bitmaps so that they will be eligible for sticky
312 // collection.
313 // There is a race here which is safely handled. Another thread such as the hprof could
314 // have flushed the alloc stack after we resumed the threads. This is safe however, since
315 // reseting the allocation stack zeros it out with madvise. This means that we will either
316 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
317 // first place.
318 mirror::Object** end = allocation_stack->End();
319 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700320 const Object* obj = *it;
Mathieu Chartier9642c962013-08-05 17:40:36 -0700321 if (obj != NULL) {
322 UnMarkObjectNonNull(obj);
323 }
324 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800325 }
326
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800327 {
328 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
329
330 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700331 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800332
333 // Swap the live and mark bitmaps for each space which we modified space. This is an
334 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
335 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700336 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800337 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700338 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800339
340 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800341 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
342 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800343 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800344}
345
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800346void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
347 immune_begin_ = begin;
348 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700349}
350
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700351void MarkSweep::FindDefaultMarkBitmap() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800352 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700353 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700354 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
355 if (bitmap != nullptr &&
356 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
357 current_mark_bitmap_ = bitmap;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700358 CHECK(current_mark_bitmap_ != NULL);
359 return;
360 }
361 }
362 GetHeap()->DumpSpaces();
363 LOG(FATAL) << "Could not find a default mark bitmap";
364}
365
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800366void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700367 ResizeMarkStack(mark_stack_->Capacity() * 2);
368}
369
370void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800371 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800372 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
373 // Someone else acquired the lock and expanded the mark stack before us.
374 return;
375 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700376 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700377 CHECK_LE(mark_stack_->Size(), new_size);
378 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700379 for (const auto& obj : temp) {
380 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800381 }
382}
383
Mathieu Chartier9642c962013-08-05 17:40:36 -0700384inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800385 DCHECK(obj != NULL);
386 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700387 MutexLock mu(Thread::Current(), mark_stack_lock_);
388 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700389 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800390 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700391 // The object must be pushed on to the mark stack.
392 mark_stack_->PushBack(const_cast<Object*>(obj));
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800393 }
394}
395
Mathieu Chartier39e32612013-11-12 16:28:05 -0800396mirror::Object* MarkSweep::RecursiveMarkObjectCallback(mirror::Object* obj, void* arg) {
397 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
398 mark_sweep->MarkObject(obj);
399 mark_sweep->ProcessMarkStack(true);
400 return obj;
401}
402
Mathieu Chartier9642c962013-08-05 17:40:36 -0700403inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
404 DCHECK(!IsImmune(obj));
405 // Try to take advantage of locality of references within a space, failing this find the space
406 // the hard way.
407 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
408 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
409 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
410 if (LIKELY(new_bitmap != NULL)) {
411 object_bitmap = new_bitmap;
412 } else {
413 MarkLargeObject(obj, false);
414 return;
415 }
416 }
417
418 DCHECK(object_bitmap->HasAddress(obj));
419 object_bitmap->Clear(obj);
420}
421
422inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700423 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700424
Mathieu Chartier9642c962013-08-05 17:40:36 -0700425 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700426 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700427 return;
428 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700429
430 // Try to take advantage of locality of references within a space, failing this find the space
431 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700432 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700433 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700434 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
435 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700436 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700437 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700438 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700439 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700440 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700441 }
442
Carl Shapiro69759ea2011-07-21 18:13:35 -0700443 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700444 if (!object_bitmap->Test(obj)) {
445 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700446 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700447 // Lock is not needed but is here anyways to please annotalysis.
448 MutexLock mu(Thread::Current(), mark_stack_lock_);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700449 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700450 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700451 // The object must be pushed on to the mark stack.
452 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700453 }
454}
455
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700456// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700457bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700458 // TODO: support >1 discontinuous space.
459 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800460 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700461 if (kProfileLargeObjects) {
462 ++large_object_test_;
463 }
464 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700465 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700466 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
467 LOG(ERROR) << "Attempting see if it's a bad root";
468 VerifyRoots();
469 LOG(FATAL) << "Can't mark bad root";
470 }
471 if (kProfileLargeObjects) {
472 ++large_object_mark_;
473 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700474 if (set) {
475 large_objects->Set(obj);
476 } else {
477 large_objects->Clear(obj);
478 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700479 return true;
480 }
481 return false;
482}
483
484inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
485 DCHECK(obj != NULL);
486
Mathieu Chartier9642c962013-08-05 17:40:36 -0700487 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700488 DCHECK(IsMarked(obj));
489 return false;
490 }
491
492 // Try to take advantage of locality of references within a space, failing this find the space
493 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700494 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700495 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700496 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700497 if (new_bitmap != NULL) {
498 object_bitmap = new_bitmap;
499 } else {
500 // TODO: Remove the Thread::Current here?
501 // TODO: Convert this to some kind of atomic marking?
502 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700503 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700504 }
505 }
506
507 // Return true if the object was not previously marked.
508 return !object_bitmap->AtomicTestAndSet(obj);
509}
510
Carl Shapiro69759ea2011-07-21 18:13:35 -0700511// Used to mark objects when recursing. Recursion is done by moving
512// the finger across the bitmaps in address order and marking child
513// objects. Any newly-marked objects whose addresses are lower than
514// the finger won't be visited by the bitmap scan, so those objects
515// need to be added to the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700516inline void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700517 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700518 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700519 }
520}
521
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800522void MarkSweep::MarkRoot(const Object* obj) {
523 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700524 MarkObjectNonNull(obj);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800525 }
526}
527
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800528mirror::Object* MarkSweep::MarkRootParallelCallback(mirror::Object* root, void* arg,
529 uint32_t /*thread_id*/, RootType /*root_type*/) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800530 DCHECK(root != NULL);
531 DCHECK(arg != NULL);
Mathieu Chartierba311b42013-08-27 13:02:30 -0700532 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root);
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700533 return root;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800534}
535
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800536Object* MarkSweep::MarkRootCallback(Object* root, void* arg, uint32_t /*thread_id*/,
537 RootType /*root_type*/) {
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700538 DCHECK(root != nullptr);
539 DCHECK(arg != nullptr);
540 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root);
541 return root;
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700542}
543
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700544void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800545 const StackVisitor* visitor) {
546 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700547}
548
Ian Rogers40e3bac2012-11-20 00:09:14 -0800549void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700550 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700551 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
552 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700553 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700554 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800555 if (visitor != NULL) {
556 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700557 }
558 }
559 }
560}
561
562void MarkSweep::VerifyRoots() {
563 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
564}
565
Carl Shapiro69759ea2011-07-21 18:13:35 -0700566// Marks all objects in the root set.
567void MarkSweep::MarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700568 timings_.StartSplit("MarkRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700569 Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700570 timings_.EndSplit();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700571}
572
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700573void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700574 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700575 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700576 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700577}
578
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700579void MarkSweep::MarkConcurrentRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700580 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700581 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700582 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700583 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700584}
585
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700586class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700587 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700588 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
589 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700590
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800591 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700592 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700593 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800594 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
595 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
596 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700597 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700598 }
599
600 private:
601 MarkSweep* const mark_sweep_;
602};
603
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700604template <bool kUseFinger = false>
605class MarkStackTask : public Task {
606 public:
607 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
608 const Object** mark_stack)
609 : mark_sweep_(mark_sweep),
610 thread_pool_(thread_pool),
611 mark_stack_pos_(mark_stack_size) {
612 // We may have to copy part of an existing mark stack when another mark stack overflows.
613 if (mark_stack_size != 0) {
614 DCHECK(mark_stack != NULL);
615 // TODO: Check performance?
616 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700617 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700618 if (kCountTasks) {
619 ++mark_sweep_->work_chunks_created_;
620 }
621 }
622
623 static const size_t kMaxSize = 1 * KB;
624
625 protected:
626 class ScanObjectParallelVisitor {
627 public:
628 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
629 : chunk_task_(chunk_task) {}
630
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700631 void operator()(Object* obj) const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700632 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
633 mark_sweep->ScanObjectVisit(obj,
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700634 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100635 bool /* is_static */) ALWAYS_INLINE_LAMBDA {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700636 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
637 if (kUseFinger) {
638 android_memory_barrier();
639 if (reinterpret_cast<uintptr_t>(ref) >=
640 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
641 return;
642 }
643 }
644 chunk_task_->MarkStackPush(ref);
645 }
646 });
647 }
648
649 private:
650 MarkStackTask<kUseFinger>* const chunk_task_;
651 };
652
653 virtual ~MarkStackTask() {
654 // Make sure that we have cleared our mark stack.
655 DCHECK_EQ(mark_stack_pos_, 0U);
656 if (kCountTasks) {
657 ++mark_sweep_->work_chunks_deleted_;
658 }
659 }
660
661 MarkSweep* const mark_sweep_;
662 ThreadPool* const thread_pool_;
663 // Thread local mark stack for this task.
664 const Object* mark_stack_[kMaxSize];
665 // Mark stack position.
666 size_t mark_stack_pos_;
667
668 void MarkStackPush(const Object* obj) ALWAYS_INLINE {
669 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
670 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
671 mark_stack_pos_ /= 2;
672 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
673 mark_stack_ + mark_stack_pos_);
674 thread_pool_->AddTask(Thread::Current(), task);
675 }
676 DCHECK(obj != nullptr);
677 DCHECK(mark_stack_pos_ < kMaxSize);
678 mark_stack_[mark_stack_pos_++] = obj;
679 }
680
681 virtual void Finalize() {
682 delete this;
683 }
684
685 // Scans all of the objects
686 virtual void Run(Thread* self) {
687 ScanObjectParallelVisitor visitor(this);
688 // TODO: Tune this.
689 static const size_t kFifoSize = 4;
690 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
691 for (;;) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700692 const Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700693 if (kUseMarkStackPrefetch) {
694 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
695 const Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700696 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700697 __builtin_prefetch(obj);
698 prefetch_fifo.push_back(obj);
699 }
700 if (UNLIKELY(prefetch_fifo.empty())) {
701 break;
702 }
703 obj = prefetch_fifo.front();
704 prefetch_fifo.pop_front();
705 } else {
706 if (UNLIKELY(mark_stack_pos_ == 0)) {
707 break;
708 }
709 obj = mark_stack_[--mark_stack_pos_];
710 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700711 DCHECK(obj != nullptr);
712 visitor(const_cast<mirror::Object*>(obj));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700713 }
714 }
715};
716
717class CardScanTask : public MarkStackTask<false> {
718 public:
719 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
720 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
721 const Object** mark_stack_obj)
722 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
723 bitmap_(bitmap),
724 begin_(begin),
725 end_(end),
726 minimum_age_(minimum_age) {
727 }
728
729 protected:
730 accounting::SpaceBitmap* const bitmap_;
731 byte* const begin_;
732 byte* const end_;
733 const byte minimum_age_;
734
735 virtual void Finalize() {
736 delete this;
737 }
738
739 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
740 ScanObjectParallelVisitor visitor(this);
741 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700742 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700743 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
744 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700745 // Finish by emptying our local mark stack.
746 MarkStackTask::Run(self);
747 }
748};
749
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700750size_t MarkSweep::GetThreadCount(bool paused) const {
751 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
752 return 0;
753 }
754 if (paused) {
755 return heap_->GetParallelGCThreadCount() + 1;
756 } else {
757 return heap_->GetConcGCThreadCount() + 1;
758 }
759}
760
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700761void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
762 accounting::CardTable* card_table = GetHeap()->GetCardTable();
763 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700764 size_t thread_count = GetThreadCount(paused);
765 // The parallel version with only one thread is faster for card scanning, TODO: fix.
766 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700767 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700768 // Can't have a different split for each space since multiple spaces can have their cards being
769 // scanned at the same time.
770 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
771 // Try to take some of the mark stack since we can pass this off to the worker tasks.
772 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
773 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
Mathieu Chartier720ef762013-08-17 14:46:54 -0700774 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700775 // Estimated number of work tasks we will create.
776 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
777 DCHECK_NE(mark_stack_tasks, 0U);
778 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
779 mark_stack_size / mark_stack_tasks + 1);
780 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700781 if (space->GetMarkBitmap() == nullptr) {
782 continue;
783 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700784 byte* card_begin = space->Begin();
785 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800786 // Align up the end address. For example, the image space's end
787 // may not be card-size-aligned.
788 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
789 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
790 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700791 // Calculate how many bytes of heap we will scan,
792 const size_t address_range = card_end - card_begin;
793 // Calculate how much address range each task gets.
794 const size_t card_delta = RoundUp(address_range / thread_count + 1,
795 accounting::CardTable::kCardSize);
796 // Create the worker tasks for this space.
797 while (card_begin != card_end) {
798 // Add a range of cards.
799 size_t addr_remaining = card_end - card_begin;
800 size_t card_increment = std::min(card_delta, addr_remaining);
801 // Take from the back of the mark stack.
802 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
803 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
804 mark_stack_end -= mark_stack_increment;
805 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
806 DCHECK_EQ(mark_stack_end, mark_stack_->End());
807 // Add the new task to the thread pool.
808 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
809 card_begin + card_increment, minimum_age,
810 mark_stack_increment, mark_stack_end);
811 thread_pool->AddTask(self, task);
812 card_begin += card_increment;
813 }
814 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700815
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800816 // Note: the card scan below may dirty new cards (and scan them)
817 // as a side effect when a Reference object is encountered and
818 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700819 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700820 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700821 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700822 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700823 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700824 } else {
825 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700826 if (space->GetMarkBitmap() != nullptr) {
827 // Image spaces are handled properly since live == marked for them.
828 switch (space->GetGcRetentionPolicy()) {
829 case space::kGcRetentionPolicyNeverCollect:
830 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
831 "ScanGrayImageSpaceObjects");
832 break;
833 case space::kGcRetentionPolicyFullCollect:
834 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
835 "ScanGrayZygoteSpaceObjects");
836 break;
837 case space::kGcRetentionPolicyAlwaysCollect:
838 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
839 "ScanGrayAllocSpaceObjects");
840 break;
841 }
842 ScanObjectVisitor visitor(this);
843 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
844 timings_.EndSplit();
845 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700846 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700847 }
848}
849
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700850class RecursiveMarkTask : public MarkStackTask<false> {
851 public:
852 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
853 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
854 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
855 bitmap_(bitmap),
856 begin_(begin),
857 end_(end) {
858 }
859
860 protected:
861 accounting::SpaceBitmap* const bitmap_;
862 const uintptr_t begin_;
863 const uintptr_t end_;
864
865 virtual void Finalize() {
866 delete this;
867 }
868
869 // Scans all of the objects
870 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
871 ScanObjectParallelVisitor visitor(this);
872 bitmap_->VisitMarkedRange(begin_, end_, visitor);
873 // Finish by emptying our local mark stack.
874 MarkStackTask::Run(self);
875 }
876};
877
Carl Shapiro58551df2011-07-24 03:09:51 -0700878// Populates the mark stack based on the set of marked objects and
879// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800880void MarkSweep::RecursiveMark() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800881 TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800882 // RecursiveMark will build the lists of known instances of the Reference classes. See
883 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700884 if (kUseRecursiveMark) {
885 const bool partial = GetGcType() == kGcTypePartial;
886 ScanObjectVisitor scan_visitor(this);
887 auto* self = Thread::Current();
888 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700889 size_t thread_count = GetThreadCount(false);
890 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700891 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700892 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700893 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
894 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800895 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700896 if (current_mark_bitmap_ == nullptr) {
897 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800898 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700899 if (parallel) {
900 // We will use the mark stack the future.
901 // CHECK(mark_stack_->IsEmpty());
902 // This function does not handle heap end increasing, so we must use the space end.
903 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
904 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
905 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
906
907 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700908 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700909 while (begin != end) {
910 uintptr_t start = begin;
911 uintptr_t delta = (end - begin) / n;
912 delta = RoundUp(delta, KB);
913 if (delta < 16 * KB) delta = end - begin;
914 begin += delta;
915 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
916 begin);
917 thread_pool->AddTask(self, task);
918 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700919 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700920 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700921 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700922 thread_pool->StopWorkers(self);
923 } else {
924 // This function does not handle heap end increasing, so we must use the space end.
925 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
926 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
927 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
928 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700929 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700930 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700931 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700932 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700933}
934
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800935mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700936 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700937 return object;
938 }
939 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700940}
941
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700942void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
943 ScanGrayObjects(paused, minimum_age);
944 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700945}
946
Carl Shapiro58551df2011-07-24 03:09:51 -0700947void MarkSweep::ReMarkRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700948 timings_.StartSplit("ReMarkRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700949 Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700950 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700951}
952
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700953void MarkSweep::SweepSystemWeaks() {
954 Runtime* runtime = Runtime::Current();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700955 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800956 runtime->SweepSystemWeaks(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700957 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700958}
959
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700960mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700961 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
962 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700963 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700964}
965
966void MarkSweep::VerifyIsLive(const Object* obj) {
967 Heap* heap = GetHeap();
968 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700969 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700970 if (!large_object_space->GetLiveObjects()->Test(obj)) {
971 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
972 heap->allocation_stack_->End()) {
973 // Object not found!
974 heap->DumpSpaces();
975 LOG(FATAL) << "Found dead object " << obj;
976 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700977 }
978 }
979}
980
981void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700982 // Verify system weaks, uses a special object visitor which returns the input object.
983 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700984}
985
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700986class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700987 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700988 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700989
990 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -0700991 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700992 // Note: self is not necessarily equal to thread since thread may be suspended.
993 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800994 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
995 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800996 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -0700997 ATRACE_END();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700998 mark_sweep_->GetBarrier().Pass(self);
999 }
1000
1001 private:
1002 MarkSweep* mark_sweep_;
1003};
1004
Ian Rogers1d54e732013-05-02 21:10:01 -07001005void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001006 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001007 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001008 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001009 // Request the check point is run on all threads returning a count of the threads that must
1010 // run through the barrier including self.
1011 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1012 // Release locks then wait for all mutator threads to pass the barrier.
1013 // TODO: optimize to not release locks when there are no threads to wait for.
1014 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1015 Locks::mutator_lock_->SharedUnlock(self);
1016 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1017 CHECK_EQ(old_state, kWaitingPerformingGc);
1018 gc_barrier_->Increment(self, barrier_count);
1019 self->SetState(kWaitingPerformingGc);
1020 Locks::mutator_lock_->SharedLock(self);
1021 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001022 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001023}
1024
Ian Rogers1d54e732013-05-02 21:10:01 -07001025void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001026 timings_.StartSplit("SweepArray");
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001027 Thread* self = Thread::Current();
1028 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1029 size_t chunk_free_pos = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001030 size_t freed_bytes = 0;
1031 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001032 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001033 size_t freed_large_objects = 0;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001034 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001035 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001036 size_t count = allocations->Size();
1037 // Change the order to ensure that the non-moving space last swept as an optimization.
1038 std::vector<space::ContinuousSpace*> sweep_spaces;
1039 space::ContinuousSpace* non_moving_space = nullptr;
1040 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1041 if (space->IsAllocSpace() && !IsImmuneSpace(space) && space->GetLiveBitmap() != nullptr) {
1042 if (space == heap_->GetNonMovingSpace()) {
1043 non_moving_space = space;
1044 } else {
1045 sweep_spaces.push_back(space);
1046 }
1047 }
1048 }
1049 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1050 // the other alloc spaces as an optimization.
1051 if (non_moving_space != nullptr) {
1052 sweep_spaces.push_back(non_moving_space);
1053 }
1054 // Start by sweeping the continuous spaces.
1055 for (space::ContinuousSpace* space : sweep_spaces) {
1056 space::AllocSpace* alloc_space = space->AsAllocSpace();
1057 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1058 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1059 if (swap_bitmaps) {
1060 std::swap(live_bitmap, mark_bitmap);
1061 }
1062 Object** out = objects;
1063 for (size_t i = 0; i < count; ++i) {
1064 Object* obj = objects[i];
1065 if (space->HasAddress(obj)) {
1066 // This object is in the space, remove it from the array and add it to the sweep buffer
1067 // if needed.
1068 if (!mark_bitmap->Test(obj)) {
1069 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1070 timings_.StartSplit("FreeList");
1071 freed_objects += chunk_free_pos;
1072 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1073 timings_.EndSplit();
1074 chunk_free_pos = 0;
1075 }
1076 chunk_free_buffer[chunk_free_pos++] = obj;
1077 }
1078 } else {
1079 *(out++) = obj;
1080 }
1081 }
1082 if (chunk_free_pos > 0) {
1083 timings_.StartSplit("FreeList");
1084 freed_objects += chunk_free_pos;
1085 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1086 timings_.EndSplit();
1087 chunk_free_pos = 0;
1088 }
1089 // All of the references which space contained are no longer in the allocation stack, update
1090 // the count.
1091 count = out - objects;
1092 }
1093 // Handle the large object space.
1094 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001095 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
1096 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001097 if (swap_bitmaps) {
1098 std::swap(large_live_objects, large_mark_objects);
1099 }
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001100 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001101 Object* obj = objects[i];
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001102 // Handle large objects.
1103 if (!large_mark_objects->Test(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001104 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001105 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001106 }
1107 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001108 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001109
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001110 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001111 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001112 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001113 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001114 freed_objects_.FetchAndAdd(freed_objects);
1115 freed_large_objects_.FetchAndAdd(freed_large_objects);
1116 freed_bytes_.FetchAndAdd(freed_bytes);
1117 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001118 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001119
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001120 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001121 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001122 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001123}
1124
Ian Rogers1d54e732013-05-02 21:10:01 -07001125void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001126 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -08001127 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001128 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001129 if (space->IsContinuousMemMapAllocSpace()) {
1130 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierec050072014-01-07 16:00:07 -08001131 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001132 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -08001133 size_t freed_objects = 0;
1134 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001135 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -08001136 heap_->RecordFree(freed_objects, freed_bytes);
1137 freed_objects_.FetchAndAdd(freed_objects);
1138 freed_bytes_.FetchAndAdd(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001139 }
1140 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001141 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001142}
1143
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001144void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001145 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001146 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001147 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001148 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001149 freed_large_objects_.FetchAndAdd(freed_objects);
1150 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001151 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001152}
1153
Carl Shapiro69759ea2011-07-21 18:13:35 -07001154// Process the "referent" field in a java.lang.ref.Reference. If the
1155// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001156// the heap for later processing.
1157void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1158 DCHECK(klass != nullptr);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001159 DCHECK(klass->IsReferenceClass());
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001160 DCHECK(obj != NULL);
Mathieu Chartier39e32612013-11-12 16:28:05 -08001161 heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001162}
1163
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001164class MarkObjectVisitor {
1165 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001166 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001167
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001168 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001169 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001170 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001171 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001172 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001173 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1174 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1175 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001176 mark_sweep_->MarkObject(ref);
1177 }
1178
1179 private:
1180 MarkSweep* const mark_sweep_;
1181};
1182
Carl Shapiro69759ea2011-07-21 18:13:35 -07001183// Scans an object reference. Determines the type of the reference
1184// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001185void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001186 MarkObjectVisitor visitor(this);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001187 ScanObjectVisit(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001188}
1189
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001190void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001191 Thread* self = Thread::Current();
1192 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001193 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1194 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001195 CHECK_GT(chunk_size, 0U);
1196 // Split the current mark stack up into work tasks.
1197 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1198 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1199 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1200 const_cast<const mirror::Object**>(it)));
1201 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001202 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001203 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001204 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001205 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001206 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001207 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001208 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001209}
1210
Ian Rogers5d76c432011-10-31 21:42:49 -07001211// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001212void MarkSweep::ProcessMarkStack(bool paused) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001213 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001214 size_t thread_count = GetThreadCount(paused);
1215 if (kParallelProcessMarkStack && thread_count > 1 &&
1216 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1217 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001218 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001219 // TODO: Tune this.
1220 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001221 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001222 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001223 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001224 if (kUseMarkStackPrefetch) {
1225 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001226 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001227 DCHECK(obj != NULL);
1228 __builtin_prefetch(obj);
1229 prefetch_fifo.push_back(obj);
1230 }
1231 if (prefetch_fifo.empty()) {
1232 break;
1233 }
1234 obj = prefetch_fifo.front();
1235 prefetch_fifo.pop_front();
1236 } else {
1237 if (mark_stack_->IsEmpty()) {
1238 break;
1239 }
1240 obj = mark_stack_->PopBack();
1241 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001242 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001243 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001244 }
1245 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001246 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001247}
1248
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001249inline bool MarkSweep::IsMarked(const Object* object) const
1250 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001251 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001252 return true;
1253 }
1254 DCHECK(current_mark_bitmap_ != NULL);
1255 if (current_mark_bitmap_->HasAddress(object)) {
1256 return current_mark_bitmap_->Test(object);
1257 }
1258 return heap_->GetMarkBitmap()->Test(object);
1259}
1260
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001261void MarkSweep::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001262 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Anwar Ghuloum46543222013-08-12 09:28:42 -07001263 // Can't enqueue references if we hold the mutator lock.
Ian Rogers1d54e732013-05-02 21:10:01 -07001264 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001265 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001266 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001267
Anwar Ghuloum46543222013-08-12 09:28:42 -07001268 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001269 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001270
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001271 // Update the cumulative statistics
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001272 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1273 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001274
1275 // Ensure that the mark stack is empty.
1276 CHECK(mark_stack_->IsEmpty());
1277
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001278 if (kCountScannedTypes) {
1279 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1280 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001281 }
1282
1283 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001284 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001285 }
1286
1287 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001288 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001289 }
1290
1291 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001292 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001293 }
1294
1295 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001296 VLOG(gc) << "Classes marked " << classes_marked_;
1297 }
1298
1299 if (kCountJavaLangRefs) {
1300 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001301 }
1302
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001303 // Update the cumulative loggers.
1304 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001305 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001306 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001307
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001308 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001309 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001310 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
1311 if (bitmap != nullptr &&
1312 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1313 bitmap->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001314 }
1315 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001316 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001317
1318 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001319 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001320 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001321}
1322
Ian Rogers1d54e732013-05-02 21:10:01 -07001323} // namespace collector
1324} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001325} // namespace art