blob: bb41b574e59d34a54a9dde2c96e6ed9175e794e3 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070030#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080037#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070038#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "mirror/object-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070040#include "runtime.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070041#include "scoped_thread_state_change.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070042#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070043#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070044
Brian Carlstromea46f952013-07-30 01:26:50 -070045using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070046using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070047using ::art::mirror::Object;
48using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080049
Carl Shapiro69759ea2011-07-21 18:13:35 -070050namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070051namespace gc {
52namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070053
Mathieu Chartier02b6a782012-10-26 13:51:26 -070054// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080055static constexpr bool kUseRecursiveMark = false;
56static constexpr bool kUseMarkStackPrefetch = true;
57static constexpr size_t kSweepArrayChunkFreeSize = 1024;
58static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070059
60// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080061static constexpr bool kParallelCardScan = true;
62static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070063// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
65// having this can add overhead in ProcessReferences since we may end up doing many calls of
66// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080067static constexpr size_t kMinimumParallelMarkStackSize = 128;
68static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070069
Mathieu Chartier02b6a782012-10-26 13:51:26 -070070// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080071static constexpr bool kProfileLargeObjects = false;
72static constexpr bool kMeasureOverhead = false;
73static constexpr bool kCountTasks = false;
74static constexpr bool kCountJavaLangRefs = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070075static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070076
77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080078static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier7bf9f192014-04-04 11:09:41 -070079static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070080
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070081// If true, revoke the rosalloc thread-local buffers at the
82// checkpoint, as opposed to during the pause.
83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
84
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -070086 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -080087 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080088 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070089 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -070090 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070091 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080092 }
93 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -070094 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -080095}
96
Ian Rogers1d54e732013-05-02 21:10:01 -070097MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
98 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -070099 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -0700100 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800101 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800102 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700104 is_concurrent_(is_concurrent) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800105}
106
107void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800109 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700110 mark_stack_ = heap_->mark_stack_.get();
111 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700112 immune_region_.Reset();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800113 class_count_ = 0;
114 array_count_ = 0;
115 other_count_ = 0;
116 large_object_test_ = 0;
117 large_object_mark_ = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800118 overhead_time_ = 0;
119 work_chunks_created_ = 0;
120 work_chunks_deleted_ = 0;
121 reference_count_ = 0;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700122 mark_null_count_ = 0;
123 mark_immune_count_ = 0;
124 mark_fastpath_count_ = 0;
125 mark_slowpath_count_ = 0;
126 FindDefaultSpaceBitmap();
127 {
128 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
129 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
130 mark_bitmap_ = heap_->GetMarkBitmap();
131 }
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700132 if (!clear_soft_references_) {
133 // Always clear soft references if a non-sticky collection.
134 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky;
135 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700136 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700137 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800138 heap_->PreGcVerification(this);
139}
140
141void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800142 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800143 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800144 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800145 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800146}
147
Mathieu Chartier601276a2014-03-20 15:12:30 -0700148void MarkSweep::PreProcessReferences() {
149 if (IsConcurrent()) {
150 // No reason to do this for non-concurrent GC since pre processing soft references only helps
151 // pauses.
152 timings_.NewSplit("PreProcessReferences");
153 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback,
154 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
155 }
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700156}
157
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700158void MarkSweep::PausePhase() {
159 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800160 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800161 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700162 if (IsConcurrent()) {
163 // Handle the dirty objects if we are a concurrent GC.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800164 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800165 // Re-mark root set.
166 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800167 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700168 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800169 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800170 ProcessReferences(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700171 {
172 timings_.NewSplit("SwapStacks");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800173 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700174 heap_->SwapStacks(self);
175 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
176 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
177 // stacks and don't want anybody to allocate into the live stack.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800178 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800179 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700180 timings_.StartSplit("PreSweepingGcVerification");
181 heap_->PreSweepingGcVerification(this);
182 timings_.EndSplit();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700183 if (IsConcurrent()) {
184 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
185 // weak before we sweep them. Since this new system weak may not be marked, the GC may
186 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
187 // reference to a string that is about to be swept.
188 Runtime::Current()->DisallowNewSystemWeaks();
189 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800190}
191
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800192void MarkSweep::PreCleanCards() {
193 // Don't do this for non concurrent GCs since they don't have any dirty cards.
194 if (kPreCleanCards && IsConcurrent()) {
195 Thread* self = Thread::Current();
196 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
197 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800198 heap_->ProcessCards(timings_, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800199 // The checkpoint root marking is required to avoid a race condition which occurs if the
200 // following happens during a reference write:
201 // 1. mutator dirties the card (write barrier)
202 // 2. GC ages the card (the above ProcessCards call)
203 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
204 // 4. mutator writes the value (corresponding to the write barrier in 1.)
205 // This causes the GC to age the card but not necessarily mark the reference which the mutator
206 // wrote into the object stored in the card.
207 // Having the checkpoint fixes this issue since it ensures that the card mark and the
208 // reference write are visible to the GC before the card is scanned (this is due to locks being
209 // acquired / released in the checkpoint code).
210 // The other roots are also marked to help reduce the pause.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700211 MarkRootsCheckpoint(self, false);
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800212 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800213 MarkConcurrentRoots(
214 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800215 // Process the newly aged cards.
216 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
217 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
218 // in the next GC.
219 }
220}
221
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800222void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
223 if (kUseThreadLocalAllocationStack) {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700224 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800225 Locks::mutator_lock_->AssertExclusiveHeld(self);
226 heap_->RevokeAllThreadLocalAllocationStacks(self);
227 }
228}
229
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800230void MarkSweep::MarkingPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800231 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800232 Thread* self = Thread::Current();
233
234 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700235 FindDefaultSpaceBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700236
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800237 // Process dirty cards and add dirty cards to mod union tables.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800238 heap_->ProcessCards(timings_, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800239
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800240 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800241 MarkRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800242 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800243 // Pre-clean dirtied cards to reduce pauses.
244 PreCleanCards();
Mathieu Chartier601276a2014-03-20 15:12:30 -0700245 PreProcessReferences();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800246}
247
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700248void MarkSweep::UpdateAndMarkModUnion() {
249 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700250 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700251 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
252 "UpdateAndMarkImageModUnionTable";
Ian Rogers5fe9af72013-11-14 00:17:20 -0800253 TimingLogger::ScopedSplit split(name, &timings_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700254 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
255 CHECK(mod_union_table != nullptr);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800256 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700257 }
258 }
259}
260
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261void MarkSweep::MarkReachableObjects() {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700262 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263 // Recursively mark all the non-image bits set in the mark bitmap.
264 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800265}
266
267void MarkSweep::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800268 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700269 Thread* self = Thread::Current();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700270 SweepSystemWeaks(self);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700271 if (IsConcurrent()) {
272 Runtime::Current()->AllowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800273 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800274 {
275 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
276
277 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700278 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800279
280 // Swap the live and mark bitmaps for each space which we modified space. This is an
281 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
282 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700283 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800284 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700285 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800286
287 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800288 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
289 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800290 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800291}
292
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700293void MarkSweep::FindDefaultSpaceBitmap() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800294 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700295 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700296 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
297 if (bitmap != nullptr &&
298 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700299 current_space_bitmap_ = bitmap;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700300 return;
301 }
302 }
303 GetHeap()->DumpSpaces();
304 LOG(FATAL) << "Could not find a default mark bitmap";
305}
306
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800307void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700308 ResizeMarkStack(mark_stack_->Capacity() * 2);
309}
310
311void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800312 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800313 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
314 // Someone else acquired the lock and expanded the mark stack before us.
315 return;
316 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700317 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700318 CHECK_LE(mark_stack_->Size(), new_size);
319 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700320 for (const auto& obj : temp) {
321 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800322 }
323}
324
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700325inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800326 DCHECK(obj != NULL);
327 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700328 MutexLock mu(Thread::Current(), mark_stack_lock_);
329 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700330 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800331 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700332 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700333 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800334 }
335}
336
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800337mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800338 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
339 mark_sweep->MarkObject(obj);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800340 return obj;
341}
342
Mathieu Chartier407f7022014-02-18 14:37:05 -0800343void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
344 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
345}
346
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700347inline void MarkSweep::MarkObjectNonNull(Object* obj) {
348 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700349 if (kUseBakerOrBrooksReadBarrier) {
350 // Verify all the objects have the correct pointer installed.
351 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800352 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700353 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700354 if (kCountMarkedObjects) {
355 ++mark_immune_count_;
356 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700357 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358 return;
359 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700360 // Try to take advantage of locality of references within a space, failing this find the space
361 // the hard way.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700362 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700363 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700364 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
365 if (kCountMarkedObjects) {
366 ++mark_slowpath_count_;
367 }
368 if (UNLIKELY(object_bitmap == nullptr)) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700369 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700370 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700371 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700372 } else if (kCountMarkedObjects) {
373 ++mark_fastpath_count_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700374 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700375 // This object was not previously marked.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700376 if (!object_bitmap->Set(obj)) {
377 PushOnMarkStack(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700378 }
379}
380
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700381inline void MarkSweep::PushOnMarkStack(Object* obj) {
382 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
383 // Lock is not needed but is here anyways to please annotalysis.
384 MutexLock mu(Thread::Current(), mark_stack_lock_);
385 ExpandMarkStack();
386 }
387 // The object must be pushed on to the mark stack.
388 mark_stack_->PushBack(obj);
389}
390
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700391// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700392bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700393 // TODO: support >1 discontinuous space.
394 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800395 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700396 if (kProfileLargeObjects) {
397 ++large_object_test_;
398 }
399 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700400 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700401 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
402 LOG(ERROR) << "Attempting see if it's a bad root";
403 VerifyRoots();
404 LOG(FATAL) << "Can't mark bad root";
405 }
406 if (kProfileLargeObjects) {
407 ++large_object_mark_;
408 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700409 if (set) {
410 large_objects->Set(obj);
411 } else {
412 large_objects->Clear(obj);
413 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700414 return true;
415 }
416 return false;
417}
418
419inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700420 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700421 if (kUseBakerOrBrooksReadBarrier) {
422 // Verify all the objects have the correct pointer installed.
423 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800424 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700425 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700426 DCHECK(IsMarked(obj));
427 return false;
428 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700429 // Try to take advantage of locality of references within a space, failing this find the space
430 // the hard way.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700431 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700432 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700433 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700434 if (new_bitmap != NULL) {
435 object_bitmap = new_bitmap;
436 } else {
437 // TODO: Remove the Thread::Current here?
438 // TODO: Convert this to some kind of atomic marking?
439 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700440 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700441 }
442 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700443 // Return true if the object was not previously marked.
444 return !object_bitmap->AtomicTestAndSet(obj);
445}
446
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700447// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
448inline void MarkSweep::MarkObject(Object* obj) {
449 if (obj != nullptr) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700450 MarkObjectNonNull(obj);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700451 } else if (kCountMarkedObjects) {
452 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700453 }
454}
455
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700456void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
Mathieu Chartier815873e2014-02-13 18:02:13 -0800457 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800458 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800459}
460
Mathieu Chartier893263b2014-03-04 11:07:42 -0800461void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
462 RootType /*root_type*/) {
463 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
464}
465
Mathieu Chartier815873e2014-02-13 18:02:13 -0800466void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
467 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800468 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
469}
470
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700471void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700472 const StackVisitor* visitor, RootType root_type) {
473 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700474}
475
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700476void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
477 RootType root_type) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700478 // See if the root is on any space bitmap.
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700479 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700480 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700481 if (!large_object_space->Contains(root)) {
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700482 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800483 if (visitor != NULL) {
484 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700485 }
486 }
487 }
488}
489
490void MarkSweep::VerifyRoots() {
491 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
492}
493
Mathieu Chartier893263b2014-03-04 11:07:42 -0800494void MarkSweep::MarkRoots(Thread* self) {
495 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
496 // If we exclusively hold the mutator lock, all threads must be suspended.
497 timings_.StartSplit("MarkRoots");
498 Runtime::Current()->VisitRoots(MarkRootCallback, this);
499 timings_.EndSplit();
500 RevokeAllThreadLocalAllocationStacks(self);
501 } else {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700502 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800503 // At this point the live stack should no longer have any mutators which push into it.
504 MarkNonThreadRoots();
505 MarkConcurrentRoots(
506 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
507 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700508}
509
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700510void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700511 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700512 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700513 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700514}
515
Mathieu Chartier893263b2014-03-04 11:07:42 -0800516void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700517 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700518 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier893263b2014-03-04 11:07:42 -0800519 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700520 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700521}
522
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700523class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700524 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700525 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
526 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700527
Mathieu Chartier407f7022014-02-18 14:37:05 -0800528 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
529 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700530 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800531 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
532 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
533 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700534 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700535 }
536
537 private:
538 MarkSweep* const mark_sweep_;
539};
540
Mathieu Chartier407f7022014-02-18 14:37:05 -0800541class DelayReferenceReferentVisitor {
542 public:
543 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
544 }
545
546 void operator()(mirror::Class* klass, mirror::Reference* ref) const
547 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
548 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
549 collector_->DelayReferenceReferent(klass, ref);
550 }
551
552 private:
553 MarkSweep* const collector_;
554};
555
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700556template <bool kUseFinger = false>
557class MarkStackTask : public Task {
558 public:
559 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700560 Object** mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700561 : mark_sweep_(mark_sweep),
562 thread_pool_(thread_pool),
563 mark_stack_pos_(mark_stack_size) {
564 // We may have to copy part of an existing mark stack when another mark stack overflows.
565 if (mark_stack_size != 0) {
566 DCHECK(mark_stack != NULL);
567 // TODO: Check performance?
568 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700569 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700570 if (kCountTasks) {
571 ++mark_sweep_->work_chunks_created_;
572 }
573 }
574
575 static const size_t kMaxSize = 1 * KB;
576
577 protected:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800578 class MarkObjectParallelVisitor {
579 public:
580 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
581 MarkSweep* mark_sweep) ALWAYS_INLINE
582 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
583
584 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
585 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
586 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
587 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
588 if (kUseFinger) {
589 android_memory_barrier();
590 if (reinterpret_cast<uintptr_t>(ref) >=
591 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) {
592 return;
593 }
594 }
595 chunk_task_->MarkStackPush(ref);
596 }
597 }
598
599 private:
600 MarkStackTask<kUseFinger>* const chunk_task_;
601 MarkSweep* const mark_sweep_;
602 };
603
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700604 class ScanObjectParallelVisitor {
605 public:
606 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
607 : chunk_task_(chunk_task) {}
608
Mathieu Chartier407f7022014-02-18 14:37:05 -0800609 // No thread safety analysis since multiple threads will use this visitor.
610 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
611 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
612 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
613 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
614 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
615 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700616 }
617
618 private:
619 MarkStackTask<kUseFinger>* const chunk_task_;
620 };
621
622 virtual ~MarkStackTask() {
623 // Make sure that we have cleared our mark stack.
624 DCHECK_EQ(mark_stack_pos_, 0U);
625 if (kCountTasks) {
626 ++mark_sweep_->work_chunks_deleted_;
627 }
628 }
629
630 MarkSweep* const mark_sweep_;
631 ThreadPool* const thread_pool_;
632 // Thread local mark stack for this task.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700633 Object* mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700634 // Mark stack position.
635 size_t mark_stack_pos_;
636
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700637 void MarkStackPush(Object* obj) ALWAYS_INLINE {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700638 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
639 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
640 mark_stack_pos_ /= 2;
641 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
642 mark_stack_ + mark_stack_pos_);
643 thread_pool_->AddTask(Thread::Current(), task);
644 }
645 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700646 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700647 mark_stack_[mark_stack_pos_++] = obj;
648 }
649
650 virtual void Finalize() {
651 delete this;
652 }
653
654 // Scans all of the objects
Mathieu Chartier407f7022014-02-18 14:37:05 -0800655 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
656 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700657 ScanObjectParallelVisitor visitor(this);
658 // TODO: Tune this.
659 static const size_t kFifoSize = 4;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700660 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700661 for (;;) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700662 Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700663 if (kUseMarkStackPrefetch) {
664 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700665 Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700666 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700667 __builtin_prefetch(obj);
668 prefetch_fifo.push_back(obj);
669 }
670 if (UNLIKELY(prefetch_fifo.empty())) {
671 break;
672 }
673 obj = prefetch_fifo.front();
674 prefetch_fifo.pop_front();
675 } else {
676 if (UNLIKELY(mark_stack_pos_ == 0)) {
677 break;
678 }
679 obj = mark_stack_[--mark_stack_pos_];
680 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700681 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700682 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700683 }
684 }
685};
686
687class CardScanTask : public MarkStackTask<false> {
688 public:
689 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
690 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700691 Object** mark_stack_obj)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700692 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
693 bitmap_(bitmap),
694 begin_(begin),
695 end_(end),
696 minimum_age_(minimum_age) {
697 }
698
699 protected:
700 accounting::SpaceBitmap* const bitmap_;
701 byte* const begin_;
702 byte* const end_;
703 const byte minimum_age_;
704
705 virtual void Finalize() {
706 delete this;
707 }
708
709 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
710 ScanObjectParallelVisitor visitor(this);
711 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700712 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700713 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
714 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700715 // Finish by emptying our local mark stack.
716 MarkStackTask::Run(self);
717 }
718};
719
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700720size_t MarkSweep::GetThreadCount(bool paused) const {
721 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
722 return 0;
723 }
724 if (paused) {
725 return heap_->GetParallelGCThreadCount() + 1;
726 } else {
727 return heap_->GetConcGCThreadCount() + 1;
728 }
729}
730
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700731void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
732 accounting::CardTable* card_table = GetHeap()->GetCardTable();
733 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700734 size_t thread_count = GetThreadCount(paused);
735 // The parallel version with only one thread is faster for card scanning, TODO: fix.
736 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700737 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700738 // Can't have a different split for each space since multiple spaces can have their cards being
739 // scanned at the same time.
740 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
741 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700742 Object** mark_stack_begin = mark_stack_->Begin();
743 Object** mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700744 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700745 // Estimated number of work tasks we will create.
746 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
747 DCHECK_NE(mark_stack_tasks, 0U);
748 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
749 mark_stack_size / mark_stack_tasks + 1);
750 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700751 if (space->GetMarkBitmap() == nullptr) {
752 continue;
753 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700754 byte* card_begin = space->Begin();
755 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800756 // Align up the end address. For example, the image space's end
757 // may not be card-size-aligned.
758 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
759 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
760 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700761 // Calculate how many bytes of heap we will scan,
762 const size_t address_range = card_end - card_begin;
763 // Calculate how much address range each task gets.
764 const size_t card_delta = RoundUp(address_range / thread_count + 1,
765 accounting::CardTable::kCardSize);
766 // Create the worker tasks for this space.
767 while (card_begin != card_end) {
768 // Add a range of cards.
769 size_t addr_remaining = card_end - card_begin;
770 size_t card_increment = std::min(card_delta, addr_remaining);
771 // Take from the back of the mark stack.
772 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
773 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
774 mark_stack_end -= mark_stack_increment;
775 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700776 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700777 // Add the new task to the thread pool.
778 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
779 card_begin + card_increment, minimum_age,
780 mark_stack_increment, mark_stack_end);
781 thread_pool->AddTask(self, task);
782 card_begin += card_increment;
783 }
784 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700785
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800786 // Note: the card scan below may dirty new cards (and scan them)
787 // as a side effect when a Reference object is encountered and
788 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700789 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700790 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700791 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700792 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700793 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700794 } else {
795 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700796 if (space->GetMarkBitmap() != nullptr) {
797 // Image spaces are handled properly since live == marked for them.
798 switch (space->GetGcRetentionPolicy()) {
799 case space::kGcRetentionPolicyNeverCollect:
800 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
801 "ScanGrayImageSpaceObjects");
802 break;
803 case space::kGcRetentionPolicyFullCollect:
804 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
805 "ScanGrayZygoteSpaceObjects");
806 break;
807 case space::kGcRetentionPolicyAlwaysCollect:
808 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
809 "ScanGrayAllocSpaceObjects");
810 break;
811 }
812 ScanObjectVisitor visitor(this);
813 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
814 timings_.EndSplit();
815 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700816 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700817 }
818}
819
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700820class RecursiveMarkTask : public MarkStackTask<false> {
821 public:
822 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
823 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
824 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
825 bitmap_(bitmap),
826 begin_(begin),
827 end_(end) {
828 }
829
830 protected:
831 accounting::SpaceBitmap* const bitmap_;
832 const uintptr_t begin_;
833 const uintptr_t end_;
834
835 virtual void Finalize() {
836 delete this;
837 }
838
839 // Scans all of the objects
840 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
841 ScanObjectParallelVisitor visitor(this);
842 bitmap_->VisitMarkedRange(begin_, end_, visitor);
843 // Finish by emptying our local mark stack.
844 MarkStackTask::Run(self);
845 }
846};
847
Carl Shapiro58551df2011-07-24 03:09:51 -0700848// Populates the mark stack based on the set of marked objects and
849// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800850void MarkSweep::RecursiveMark() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800851 TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800852 // RecursiveMark will build the lists of known instances of the Reference classes. See
853 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700854 if (kUseRecursiveMark) {
855 const bool partial = GetGcType() == kGcTypePartial;
856 ScanObjectVisitor scan_visitor(this);
857 auto* self = Thread::Current();
858 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700859 size_t thread_count = GetThreadCount(false);
860 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700861 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700862 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700863 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
864 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700865 current_space_bitmap_ = space->GetMarkBitmap();
866 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700867 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800868 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700869 if (parallel) {
870 // We will use the mark stack the future.
871 // CHECK(mark_stack_->IsEmpty());
872 // This function does not handle heap end increasing, so we must use the space end.
873 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
874 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
875 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
876
877 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700878 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700879 while (begin != end) {
880 uintptr_t start = begin;
881 uintptr_t delta = (end - begin) / n;
882 delta = RoundUp(delta, KB);
883 if (delta < 16 * KB) delta = end - begin;
884 begin += delta;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700885 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700886 begin);
887 thread_pool->AddTask(self, task);
888 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700889 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700890 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700891 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700892 thread_pool->StopWorkers(self);
893 } else {
894 // This function does not handle heap end increasing, so we must use the space end.
895 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
896 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700897 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700898 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700899 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700900 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700901 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700902 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700903}
904
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800905mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700906 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700907 return object;
908 }
909 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700910}
911
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700912void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
913 ScanGrayObjects(paused, minimum_age);
914 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700915}
916
Carl Shapiro58551df2011-07-24 03:09:51 -0700917void MarkSweep::ReMarkRoots() {
Mathieu Chartier893263b2014-03-04 11:07:42 -0800918 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800919 timings_.StartSplit("(Paused)ReMarkRoots");
Mathieu Chartier893263b2014-03-04 11:07:42 -0800920 Runtime::Current()->VisitRoots(
921 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
922 kVisitRootFlagStopLoggingNewRoots |
923 kVisitRootFlagClearRootLog));
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700924 timings_.EndSplit();
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700925 if (kVerifyRootsMarked) {
Mathieu Chartier893263b2014-03-04 11:07:42 -0800926 timings_.StartSplit("(Paused)VerifyRoots");
927 Runtime::Current()->VisitRoots(VerifyRootMarked, this);
928 timings_.EndSplit();
929 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700930}
931
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700932void MarkSweep::SweepSystemWeaks(Thread* self) {
933 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700934 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700935 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700936 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700937}
938
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700939mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700940 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
941 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700942 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700943}
944
945void MarkSweep::VerifyIsLive(const Object* obj) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700946 if (!heap_->GetLiveBitmap()->Test(obj)) {
947 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700948 if (!large_object_space->GetLiveObjects()->Test(obj)) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700949 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
950 heap_->allocation_stack_->End()) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700951 // Object not found!
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700952 heap_->DumpSpaces();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700953 LOG(FATAL) << "Found dead object " << obj;
954 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700955 }
956 }
957}
958
959void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700960 // Verify system weaks, uses a special object visitor which returns the input object.
961 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700962}
963
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700964class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700965 public:
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700966 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
967 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
968 : mark_sweep_(mark_sweep),
969 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
970 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
971 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700972
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700973 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -0700974 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700975 // Note: self is not necessarily equal to thread since thread may be suspended.
976 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800977 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
978 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800979 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -0700980 ATRACE_END();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700981 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
982 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700983 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700984 ATRACE_END();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700985 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700986 mark_sweep_->GetBarrier().Pass(self);
987 }
988
989 private:
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700990 MarkSweep* const mark_sweep_;
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700991 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700992};
993
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700994void MarkSweep::MarkRootsCheckpoint(Thread* self,
995 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
996 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700997 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700998 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -0700999 // Request the check point is run on all threads returning a count of the threads that must
1000 // run through the barrier including self.
1001 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1002 // Release locks then wait for all mutator threads to pass the barrier.
1003 // TODO: optimize to not release locks when there are no threads to wait for.
1004 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1005 Locks::mutator_lock_->SharedUnlock(self);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001006 {
1007 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1008 gc_barrier_->Increment(self, barrier_count);
1009 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001010 Locks::mutator_lock_->SharedLock(self);
1011 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001012 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001013}
1014
Ian Rogers1d54e732013-05-02 21:10:01 -07001015void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001016 timings_.StartSplit("SweepArray");
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001017 Thread* self = Thread::Current();
1018 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1019 size_t chunk_free_pos = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001020 size_t freed_bytes = 0;
1021 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001022 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001023 size_t freed_large_objects = 0;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001024 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001025 Object** objects = allocations->Begin();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001026 size_t count = allocations->Size();
1027 // Change the order to ensure that the non-moving space last swept as an optimization.
1028 std::vector<space::ContinuousSpace*> sweep_spaces;
1029 space::ContinuousSpace* non_moving_space = nullptr;
1030 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001031 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1032 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001033 if (space == heap_->GetNonMovingSpace()) {
1034 non_moving_space = space;
1035 } else {
1036 sweep_spaces.push_back(space);
1037 }
1038 }
1039 }
1040 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1041 // the other alloc spaces as an optimization.
1042 if (non_moving_space != nullptr) {
1043 sweep_spaces.push_back(non_moving_space);
1044 }
1045 // Start by sweeping the continuous spaces.
1046 for (space::ContinuousSpace* space : sweep_spaces) {
1047 space::AllocSpace* alloc_space = space->AsAllocSpace();
1048 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1049 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1050 if (swap_bitmaps) {
1051 std::swap(live_bitmap, mark_bitmap);
1052 }
1053 Object** out = objects;
1054 for (size_t i = 0; i < count; ++i) {
1055 Object* obj = objects[i];
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001056 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1057 continue;
1058 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001059 if (space->HasAddress(obj)) {
1060 // This object is in the space, remove it from the array and add it to the sweep buffer
1061 // if needed.
1062 if (!mark_bitmap->Test(obj)) {
1063 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1064 timings_.StartSplit("FreeList");
1065 freed_objects += chunk_free_pos;
1066 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1067 timings_.EndSplit();
1068 chunk_free_pos = 0;
1069 }
1070 chunk_free_buffer[chunk_free_pos++] = obj;
1071 }
1072 } else {
1073 *(out++) = obj;
1074 }
1075 }
1076 if (chunk_free_pos > 0) {
1077 timings_.StartSplit("FreeList");
1078 freed_objects += chunk_free_pos;
1079 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1080 timings_.EndSplit();
1081 chunk_free_pos = 0;
1082 }
1083 // All of the references which space contained are no longer in the allocation stack, update
1084 // the count.
1085 count = out - objects;
1086 }
1087 // Handle the large object space.
1088 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001089 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
1090 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001091 if (swap_bitmaps) {
1092 std::swap(large_live_objects, large_mark_objects);
1093 }
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001094 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001095 Object* obj = objects[i];
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001096 // Handle large objects.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001097 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1098 continue;
1099 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001100 if (!large_mark_objects->Test(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001101 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001102 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001103 }
1104 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001105 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001106
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001107 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001108 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001109 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001110 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001111 freed_objects_.FetchAndAdd(freed_objects);
1112 freed_large_objects_.FetchAndAdd(freed_large_objects);
1113 freed_bytes_.FetchAndAdd(freed_bytes);
1114 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001115 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001116
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001117 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001118 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001119 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001120}
1121
Ian Rogers1d54e732013-05-02 21:10:01 -07001122void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001123 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1124 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1125 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1126 // knowing that new allocations won't be marked as live.
1127 timings_.StartSplit("MarkStackAsLive");
1128 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1129 heap_->MarkAllocStackAsLive(live_stack);
1130 live_stack->Reset();
1131 timings_.EndSplit();
1132
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001133 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -08001134 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001135 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001136 if (space->IsContinuousMemMapAllocSpace()) {
1137 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierec050072014-01-07 16:00:07 -08001138 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001139 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -08001140 size_t freed_objects = 0;
1141 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001142 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -08001143 heap_->RecordFree(freed_objects, freed_bytes);
1144 freed_objects_.FetchAndAdd(freed_objects);
1145 freed_bytes_.FetchAndAdd(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001146 }
1147 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001148 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001149}
1150
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001151void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001152 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001153 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001154 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001155 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001156 freed_large_objects_.FetchAndAdd(freed_objects);
1157 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001158 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001159}
1160
Mathieu Chartier407f7022014-02-18 14:37:05 -08001161// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
1162// marked, put it on the appropriate list in the heap for later processing.
1163void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001164 DCHECK(klass != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001165 if (kCountJavaLangRefs) {
1166 ++reference_count_;
1167 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001168 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001169}
1170
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001171class MarkObjectVisitor {
1172 public:
Mathieu Chartier407f7022014-02-18 14:37:05 -08001173 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1174 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001175
Mathieu Chartier407f7022014-02-18 14:37:05 -08001176 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1177 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1178 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001179 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001180 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1181 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1182 }
Mathieu Chartier407f7022014-02-18 14:37:05 -08001183 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false));
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001184 }
1185
1186 private:
1187 MarkSweep* const mark_sweep_;
1188};
1189
Carl Shapiro69759ea2011-07-21 18:13:35 -07001190// Scans an object reference. Determines the type of the reference
1191// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001192void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001193 MarkObjectVisitor mark_visitor(this);
1194 DelayReferenceReferentVisitor ref_visitor(this);
1195 ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001196}
1197
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001198void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001199 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
1200}
1201
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001202void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001203 Thread* self = Thread::Current();
1204 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001205 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1206 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001207 CHECK_GT(chunk_size, 0U);
1208 // Split the current mark stack up into work tasks.
1209 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1210 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001211 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001212 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001213 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001214 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001215 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001216 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001217 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001218 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001219 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001220}
1221
Ian Rogers5d76c432011-10-31 21:42:49 -07001222// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001223void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001224 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001225 size_t thread_count = GetThreadCount(paused);
1226 if (kParallelProcessMarkStack && thread_count > 1 &&
1227 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1228 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001229 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001230 // TODO: Tune this.
1231 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001232 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001233 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001234 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001235 if (kUseMarkStackPrefetch) {
1236 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001237 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001238 DCHECK(obj != NULL);
1239 __builtin_prefetch(obj);
1240 prefetch_fifo.push_back(obj);
1241 }
1242 if (prefetch_fifo.empty()) {
1243 break;
1244 }
1245 obj = prefetch_fifo.front();
1246 prefetch_fifo.pop_front();
1247 } else {
1248 if (mark_stack_->IsEmpty()) {
1249 break;
1250 }
1251 obj = mark_stack_->PopBack();
1252 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001253 DCHECK(obj != nullptr);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001254 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001255 }
1256 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001257 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001258}
1259
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001260inline bool MarkSweep::IsMarked(const Object* object) const
1261 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001262 if (immune_region_.ContainsObject(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001263 return true;
1264 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001265 if (current_space_bitmap_->HasAddress(object)) {
1266 return current_space_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001267 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001268 return mark_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001269}
1270
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001271void MarkSweep::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001272 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Anwar Ghuloum46543222013-08-12 09:28:42 -07001273 // Can't enqueue references if we hold the mutator lock.
Anwar Ghuloum46543222013-08-12 09:28:42 -07001274 timings_.NewSplit("PostGcVerification");
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001275 heap_->PostGcVerification(this);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001276 if (kCountScannedTypes) {
1277 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1278 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001279 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001280 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001281 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001282 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001283 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001284 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001285 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001286 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001287 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001288 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001289 if (kCountJavaLangRefs) {
1290 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001291 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001292 if (kCountMarkedObjects) {
1293 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
1294 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
1295 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001296 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001297 mark_stack_->Reset();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001298 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1299 heap_->ClearMarkedObjects();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001300}
1301
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001302void MarkSweep::RevokeAllThreadLocalBuffers() {
1303 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1304 // If concurrent, rosalloc thread-local buffers are revoked at the
1305 // thread checkpoint. Bump pointer space thread-local buffers must
1306 // not be in use.
1307 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1308 } else {
1309 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
1310 GetHeap()->RevokeAllThreadLocalBuffers();
1311 timings_.EndSplit();
1312 }
1313}
1314
Ian Rogers1d54e732013-05-02 21:10:01 -07001315} // namespace collector
1316} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001317} // namespace art