blob: bfef4384b9653b9d24368cfa511dcbbed1634b1c [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070037#include "indirect_reference_table.h"
38#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070039#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070040#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070042#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070047#include "mirror/reference-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080048#include "mirror/object-inl.h"
49#include "mirror/object_array.h"
50#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070051#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070052#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070053#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070054#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070055
Brian Carlstromea46f952013-07-30 01:26:50 -070056using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070057using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070058using ::art::mirror::Object;
59using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080060
Carl Shapiro69759ea2011-07-21 18:13:35 -070061namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070062namespace gc {
63namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070064
Mathieu Chartier02b6a782012-10-26 13:51:26 -070065// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080066static constexpr bool kUseRecursiveMark = false;
67static constexpr bool kUseMarkStackPrefetch = true;
68static constexpr size_t kSweepArrayChunkFreeSize = 1024;
69static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070070
71// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080072static constexpr bool kParallelCardScan = true;
73static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070074// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
75// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
76// having this can add overhead in ProcessReferences since we may end up doing many calls of
77// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080078static constexpr size_t kMinimumParallelMarkStackSize = 128;
79static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070080
Mathieu Chartier02b6a782012-10-26 13:51:26 -070081// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080082static constexpr bool kProfileLargeObjects = false;
83static constexpr bool kMeasureOverhead = false;
84static constexpr bool kCountTasks = false;
85static constexpr bool kCountJavaLangRefs = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070086static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070087
88// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080089static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier893263b2014-03-04 11:07:42 -080090static constexpr bool kVerifyRoots = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070091
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070092// If true, revoke the rosalloc thread-local buffers at the
93// checkpoint, as opposed to during the pause.
94static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
95
Mathieu Chartier2b82db42012-11-14 17:29:05 -080096void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -070097 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -080098 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080099 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700100 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700101 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700102 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800103 }
104 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700105 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800106}
107
Ian Rogers1d54e732013-05-02 21:10:01 -0700108MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
109 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700110 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -0700111 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800112 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800113 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700114 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700115 is_concurrent_(is_concurrent) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800116}
117
118void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700119 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800120 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700121 mark_stack_ = heap_->mark_stack_.get();
122 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700123 immune_region_.Reset();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800124 class_count_ = 0;
125 array_count_ = 0;
126 other_count_ = 0;
127 large_object_test_ = 0;
128 large_object_mark_ = 0;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129 overhead_time_ = 0;
130 work_chunks_created_ = 0;
131 work_chunks_deleted_ = 0;
132 reference_count_ = 0;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700133 mark_null_count_ = 0;
134 mark_immune_count_ = 0;
135 mark_fastpath_count_ = 0;
136 mark_slowpath_count_ = 0;
137 FindDefaultSpaceBitmap();
138 {
139 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
140 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
141 mark_bitmap_ = heap_->GetMarkBitmap();
142 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700143
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700144 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700145 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800146 heap_->PreGcVerification(this);
147}
148
149void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800150 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800151 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800152 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800153 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800154}
155
Mathieu Chartier601276a2014-03-20 15:12:30 -0700156void MarkSweep::PreProcessReferences() {
157 if (IsConcurrent()) {
158 // No reason to do this for non-concurrent GC since pre processing soft references only helps
159 // pauses.
160 timings_.NewSplit("PreProcessReferences");
161 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback,
162 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
163 }
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700164}
165
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700166void MarkSweep::HandleDirtyObjectsPhase() {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800167 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800168 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800169 Locks::mutator_lock_->AssertExclusiveHeld(self);
170
171 {
172 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
173
174 // Re-mark root set.
175 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800176
177 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700178 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800179 }
180
181 ProcessReferences(self);
182
183 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700184 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
185 GetHeap()->verify_post_gc_heap_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
187 // This second sweep makes sure that we don't have any objects in the live stack which point to
188 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700189 SweepArray(GetHeap()->allocation_stack_.get(), false);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800190 // Since SweepArray() above resets the (active) allocation
191 // stack. Need to revoke the thread-local allocation stacks that
192 // point into it.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800193 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700195
196 timings_.StartSplit("PreSweepingGcVerification");
197 heap_->PreSweepingGcVerification(this);
198 timings_.EndSplit();
199
200 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
201 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
202 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700203
204 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
205 // weak before we sweep them. Since this new system weak may not be marked, the GC may
206 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
207 // reference to a string that is about to be swept.
208 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800209}
210
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800211void MarkSweep::PreCleanCards() {
212 // Don't do this for non concurrent GCs since they don't have any dirty cards.
213 if (kPreCleanCards && IsConcurrent()) {
214 Thread* self = Thread::Current();
215 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
216 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800217 heap_->ProcessCards(timings_, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800218 // The checkpoint root marking is required to avoid a race condition which occurs if the
219 // following happens during a reference write:
220 // 1. mutator dirties the card (write barrier)
221 // 2. GC ages the card (the above ProcessCards call)
222 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
223 // 4. mutator writes the value (corresponding to the write barrier in 1.)
224 // This causes the GC to age the card but not necessarily mark the reference which the mutator
225 // wrote into the object stored in the card.
226 // Having the checkpoint fixes this issue since it ensures that the card mark and the
227 // reference write are visible to the GC before the card is scanned (this is due to locks being
228 // acquired / released in the checkpoint code).
229 // The other roots are also marked to help reduce the pause.
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800230 MarkThreadRoots(self);
231 // TODO: Only mark the dirty roots.
232 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800233 MarkConcurrentRoots(
234 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800235 // Process the newly aged cards.
236 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
237 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
238 // in the next GC.
239 }
240}
241
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800242void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
243 if (kUseThreadLocalAllocationStack) {
244 Locks::mutator_lock_->AssertExclusiveHeld(self);
245 heap_->RevokeAllThreadLocalAllocationStacks(self);
246 }
247}
248
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800249void MarkSweep::MarkingPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800250 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800251 Thread* self = Thread::Current();
252
253 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700254 FindDefaultSpaceBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700255
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800256 // Process dirty cards and add dirty cards to mod union tables.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800257 heap_->ProcessCards(timings_, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800258
259 // Need to do this before the checkpoint since we don't want any threads to add references to
260 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700261 timings_.NewSplit("SwapStacks");
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800262 heap_->SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263
264 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800265 MarkRoots(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700266 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700267 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800268 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800269 // Pre-clean dirtied cards to reduce pauses.
270 PreCleanCards();
Mathieu Chartier601276a2014-03-20 15:12:30 -0700271 PreProcessReferences();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800272}
273
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700274void MarkSweep::UpdateAndMarkModUnion() {
275 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700276 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700277 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
278 "UpdateAndMarkImageModUnionTable";
Ian Rogers5fe9af72013-11-14 00:17:20 -0800279 TimingLogger::ScopedSplit split(name, &timings_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700280 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
281 CHECK(mod_union_table != nullptr);
Mathieu Chartier815873e2014-02-13 18:02:13 -0800282 mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700283 }
284 }
285}
286
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700287void MarkSweep::MarkThreadRoots(Thread* self) {
288 MarkRootsCheckpoint(self);
289}
290
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800291void MarkSweep::MarkReachableObjects() {
292 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
293 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700294 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700295 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700296 heap_->MarkAllocStackAsLive(live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800297 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700298 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800299 // Recursively mark all the non-image bits set in the mark bitmap.
300 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800301}
302
303void MarkSweep::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800304 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700305 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800306
307 if (!IsConcurrent()) {
308 ProcessReferences(self);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700309 }
310
311 {
312 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
313 SweepSystemWeaks();
314 }
315
316 if (IsConcurrent()) {
317 Runtime::Current()->AllowNewSystemWeaks();
318
Ian Rogers5fe9af72013-11-14 00:17:20 -0800319 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700320 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700321 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800322 if (!kPreCleanCards) {
323 // The allocation stack contains things allocated since the start of the GC. These may have
324 // been marked during this GC meaning they won't be eligible for reclaiming in the next
325 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next
326 // sticky GC.
327 // There is a race here which is safely handled. Another thread such as the hprof could
328 // have flushed the alloc stack after we resumed the threads. This is safe however, since
329 // reseting the allocation stack zeros it out with madvise. This means that we will either
330 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
331 // first place.
332 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on
333 // a dirty card since we aged cards during the pre-cleaning process.
334 mirror::Object** end = allocation_stack->End();
335 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
336 const Object* obj = *it;
337 if (obj != nullptr) {
338 UnMarkObjectNonNull(obj);
339 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700340 }
341 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800342 }
343
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800344 {
345 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
346
347 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700348 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800349
350 // Swap the live and mark bitmaps for each space which we modified space. This is an
351 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
352 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700353 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800354 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700355 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800356
357 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800358 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
359 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800360 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800361}
362
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700363void MarkSweep::FindDefaultSpaceBitmap() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800364 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700365 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700366 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
367 if (bitmap != nullptr &&
368 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700369 current_space_bitmap_ = bitmap;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700370 return;
371 }
372 }
373 GetHeap()->DumpSpaces();
374 LOG(FATAL) << "Could not find a default mark bitmap";
375}
376
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800377void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700378 ResizeMarkStack(mark_stack_->Capacity() * 2);
379}
380
381void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800382 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800383 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
384 // Someone else acquired the lock and expanded the mark stack before us.
385 return;
386 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700387 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700388 CHECK_LE(mark_stack_->Size(), new_size);
389 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700390 for (const auto& obj : temp) {
391 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800392 }
393}
394
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700395inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800396 DCHECK(obj != NULL);
397 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700398 MutexLock mu(Thread::Current(), mark_stack_lock_);
399 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700400 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800401 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700402 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700403 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800404 }
405}
406
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800407mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800408 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
409 mark_sweep->MarkObject(obj);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800410 return obj;
411}
412
Mathieu Chartier9642c962013-08-05 17:40:36 -0700413inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700414 DCHECK(!immune_region_.ContainsObject(obj));
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800415 if (kUseBrooksPointer) {
416 // Verify all the objects have the correct Brooks pointer installed.
417 obj->AssertSelfBrooksPointer();
418 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700419 // Try to take advantage of locality of references within a space, failing this find the space
420 // the hard way.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700421 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartier9642c962013-08-05 17:40:36 -0700422 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700423 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700424 if (LIKELY(new_bitmap != NULL)) {
425 object_bitmap = new_bitmap;
426 } else {
427 MarkLargeObject(obj, false);
428 return;
429 }
430 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700431 DCHECK(object_bitmap->HasAddress(obj));
432 object_bitmap->Clear(obj);
433}
434
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700435inline void MarkSweep::MarkObjectNonNull(Object* obj) {
436 DCHECK(obj != nullptr);
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800437 if (kUseBrooksPointer) {
438 // Verify all the objects have the correct Brooks pointer installed.
439 obj->AssertSelfBrooksPointer();
440 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700441 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700442 if (kCountMarkedObjects) {
443 ++mark_immune_count_;
444 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700445 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700446 return;
447 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700448 // Try to take advantage of locality of references within a space, failing this find the space
449 // the hard way.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700450 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700451 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700452 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
453 if (kCountMarkedObjects) {
454 ++mark_slowpath_count_;
455 }
456 if (UNLIKELY(object_bitmap == nullptr)) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700457 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700458 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700459 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700460 } else if (kCountMarkedObjects) {
461 ++mark_fastpath_count_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700462 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700463 // This object was not previously marked.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700464 if (!object_bitmap->Set(obj)) {
465 PushOnMarkStack(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700466 }
467}
468
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700469inline void MarkSweep::PushOnMarkStack(Object* obj) {
470 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
471 // Lock is not needed but is here anyways to please annotalysis.
472 MutexLock mu(Thread::Current(), mark_stack_lock_);
473 ExpandMarkStack();
474 }
475 // The object must be pushed on to the mark stack.
476 mark_stack_->PushBack(obj);
477}
478
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700479// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700480bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700481 // TODO: support >1 discontinuous space.
482 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800483 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700484 if (kProfileLargeObjects) {
485 ++large_object_test_;
486 }
487 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700488 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700489 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
490 LOG(ERROR) << "Attempting see if it's a bad root";
491 VerifyRoots();
492 LOG(FATAL) << "Can't mark bad root";
493 }
494 if (kProfileLargeObjects) {
495 ++large_object_mark_;
496 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700497 if (set) {
498 large_objects->Set(obj);
499 } else {
500 large_objects->Clear(obj);
501 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700502 return true;
503 }
504 return false;
505}
506
507inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700508 DCHECK(obj != nullptr);
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800509 if (kUseBrooksPointer) {
510 // Verify all the objects have the correct Brooks pointer installed.
511 obj->AssertSelfBrooksPointer();
512 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700513 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700514 DCHECK(IsMarked(obj));
515 return false;
516 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700517 // Try to take advantage of locality of references within a space, failing this find the space
518 // the hard way.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700519 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700520 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700521 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700522 if (new_bitmap != NULL) {
523 object_bitmap = new_bitmap;
524 } else {
525 // TODO: Remove the Thread::Current here?
526 // TODO: Convert this to some kind of atomic marking?
527 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700528 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700529 }
530 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700531 // Return true if the object was not previously marked.
532 return !object_bitmap->AtomicTestAndSet(obj);
533}
534
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700535// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
536inline void MarkSweep::MarkObject(Object* obj) {
537 if (obj != nullptr) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700538 MarkObjectNonNull(obj);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700539 } else if (kCountMarkedObjects) {
540 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700541 }
542}
543
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700544void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
Mathieu Chartier815873e2014-02-13 18:02:13 -0800545 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800546 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800547}
548
Mathieu Chartier893263b2014-03-04 11:07:42 -0800549void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
550 RootType /*root_type*/) {
551 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
552}
553
Mathieu Chartier815873e2014-02-13 18:02:13 -0800554void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
555 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800556 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
557}
558
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700559void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800560 const StackVisitor* visitor) {
561 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700562}
563
Ian Rogers40e3bac2012-11-20 00:09:14 -0800564void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700565 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700566 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
567 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700568 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700569 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800570 if (visitor != NULL) {
571 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700572 }
573 }
574 }
575}
576
577void MarkSweep::VerifyRoots() {
578 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
579}
580
Mathieu Chartier893263b2014-03-04 11:07:42 -0800581void MarkSweep::MarkRoots(Thread* self) {
582 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
583 // If we exclusively hold the mutator lock, all threads must be suspended.
584 timings_.StartSplit("MarkRoots");
585 Runtime::Current()->VisitRoots(MarkRootCallback, this);
586 timings_.EndSplit();
587 RevokeAllThreadLocalAllocationStacks(self);
588 } else {
589 MarkThreadRoots(self);
590 // At this point the live stack should no longer have any mutators which push into it.
591 MarkNonThreadRoots();
592 MarkConcurrentRoots(
593 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
594 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700595}
596
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700597void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700598 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700599 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700600 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700601}
602
Mathieu Chartier893263b2014-03-04 11:07:42 -0800603void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700604 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700605 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier893263b2014-03-04 11:07:42 -0800606 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700607 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700608}
609
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700610class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700611 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700612 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
613 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700614
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800615 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700616 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700617 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800618 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
619 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
620 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700621 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700622 }
623
624 private:
625 MarkSweep* const mark_sweep_;
626};
627
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700628template <bool kUseFinger = false>
629class MarkStackTask : public Task {
630 public:
631 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700632 Object** mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700633 : mark_sweep_(mark_sweep),
634 thread_pool_(thread_pool),
635 mark_stack_pos_(mark_stack_size) {
636 // We may have to copy part of an existing mark stack when another mark stack overflows.
637 if (mark_stack_size != 0) {
638 DCHECK(mark_stack != NULL);
639 // TODO: Check performance?
640 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700641 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700642 if (kCountTasks) {
643 ++mark_sweep_->work_chunks_created_;
644 }
645 }
646
647 static const size_t kMaxSize = 1 * KB;
648
649 protected:
650 class ScanObjectParallelVisitor {
651 public:
652 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
653 : chunk_task_(chunk_task) {}
654
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700655 void operator()(Object* obj) const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700656 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
657 mark_sweep->ScanObjectVisit(obj,
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700658 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100659 bool /* is_static */) ALWAYS_INLINE_LAMBDA {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700660 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
661 if (kUseFinger) {
662 android_memory_barrier();
663 if (reinterpret_cast<uintptr_t>(ref) >=
664 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
665 return;
666 }
667 }
668 chunk_task_->MarkStackPush(ref);
669 }
670 });
671 }
672
673 private:
674 MarkStackTask<kUseFinger>* const chunk_task_;
675 };
676
677 virtual ~MarkStackTask() {
678 // Make sure that we have cleared our mark stack.
679 DCHECK_EQ(mark_stack_pos_, 0U);
680 if (kCountTasks) {
681 ++mark_sweep_->work_chunks_deleted_;
682 }
683 }
684
685 MarkSweep* const mark_sweep_;
686 ThreadPool* const thread_pool_;
687 // Thread local mark stack for this task.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700688 Object* mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700689 // Mark stack position.
690 size_t mark_stack_pos_;
691
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700692 void MarkStackPush(Object* obj) ALWAYS_INLINE {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700693 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
694 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
695 mark_stack_pos_ /= 2;
696 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
697 mark_stack_ + mark_stack_pos_);
698 thread_pool_->AddTask(Thread::Current(), task);
699 }
700 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700701 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700702 mark_stack_[mark_stack_pos_++] = obj;
703 }
704
705 virtual void Finalize() {
706 delete this;
707 }
708
709 // Scans all of the objects
710 virtual void Run(Thread* self) {
711 ScanObjectParallelVisitor visitor(this);
712 // TODO: Tune this.
713 static const size_t kFifoSize = 4;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700714 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700715 for (;;) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700716 Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700717 if (kUseMarkStackPrefetch) {
718 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700719 Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700720 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700721 __builtin_prefetch(obj);
722 prefetch_fifo.push_back(obj);
723 }
724 if (UNLIKELY(prefetch_fifo.empty())) {
725 break;
726 }
727 obj = prefetch_fifo.front();
728 prefetch_fifo.pop_front();
729 } else {
730 if (UNLIKELY(mark_stack_pos_ == 0)) {
731 break;
732 }
733 obj = mark_stack_[--mark_stack_pos_];
734 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700735 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700736 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700737 }
738 }
739};
740
741class CardScanTask : public MarkStackTask<false> {
742 public:
743 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
744 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700745 Object** mark_stack_obj)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700746 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
747 bitmap_(bitmap),
748 begin_(begin),
749 end_(end),
750 minimum_age_(minimum_age) {
751 }
752
753 protected:
754 accounting::SpaceBitmap* const bitmap_;
755 byte* const begin_;
756 byte* const end_;
757 const byte minimum_age_;
758
759 virtual void Finalize() {
760 delete this;
761 }
762
763 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
764 ScanObjectParallelVisitor visitor(this);
765 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700766 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700767 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
768 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700769 // Finish by emptying our local mark stack.
770 MarkStackTask::Run(self);
771 }
772};
773
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700774size_t MarkSweep::GetThreadCount(bool paused) const {
775 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
776 return 0;
777 }
778 if (paused) {
779 return heap_->GetParallelGCThreadCount() + 1;
780 } else {
781 return heap_->GetConcGCThreadCount() + 1;
782 }
783}
784
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700785void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
786 accounting::CardTable* card_table = GetHeap()->GetCardTable();
787 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700788 size_t thread_count = GetThreadCount(paused);
789 // The parallel version with only one thread is faster for card scanning, TODO: fix.
790 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700791 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700792 // Can't have a different split for each space since multiple spaces can have their cards being
793 // scanned at the same time.
794 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
795 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700796 Object** mark_stack_begin = mark_stack_->Begin();
797 Object** mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700798 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700799 // Estimated number of work tasks we will create.
800 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
801 DCHECK_NE(mark_stack_tasks, 0U);
802 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
803 mark_stack_size / mark_stack_tasks + 1);
804 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700805 if (space->GetMarkBitmap() == nullptr) {
806 continue;
807 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700808 byte* card_begin = space->Begin();
809 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800810 // Align up the end address. For example, the image space's end
811 // may not be card-size-aligned.
812 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
813 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
814 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700815 // Calculate how many bytes of heap we will scan,
816 const size_t address_range = card_end - card_begin;
817 // Calculate how much address range each task gets.
818 const size_t card_delta = RoundUp(address_range / thread_count + 1,
819 accounting::CardTable::kCardSize);
820 // Create the worker tasks for this space.
821 while (card_begin != card_end) {
822 // Add a range of cards.
823 size_t addr_remaining = card_end - card_begin;
824 size_t card_increment = std::min(card_delta, addr_remaining);
825 // Take from the back of the mark stack.
826 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
827 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
828 mark_stack_end -= mark_stack_increment;
829 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700830 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700831 // Add the new task to the thread pool.
832 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
833 card_begin + card_increment, minimum_age,
834 mark_stack_increment, mark_stack_end);
835 thread_pool->AddTask(self, task);
836 card_begin += card_increment;
837 }
838 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700839
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800840 // Note: the card scan below may dirty new cards (and scan them)
841 // as a side effect when a Reference object is encountered and
842 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700843 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700844 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700845 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700846 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700847 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700848 } else {
849 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700850 if (space->GetMarkBitmap() != nullptr) {
851 // Image spaces are handled properly since live == marked for them.
852 switch (space->GetGcRetentionPolicy()) {
853 case space::kGcRetentionPolicyNeverCollect:
854 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
855 "ScanGrayImageSpaceObjects");
856 break;
857 case space::kGcRetentionPolicyFullCollect:
858 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
859 "ScanGrayZygoteSpaceObjects");
860 break;
861 case space::kGcRetentionPolicyAlwaysCollect:
862 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
863 "ScanGrayAllocSpaceObjects");
864 break;
865 }
866 ScanObjectVisitor visitor(this);
867 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
868 timings_.EndSplit();
869 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700870 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700871 }
872}
873
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700874class RecursiveMarkTask : public MarkStackTask<false> {
875 public:
876 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
877 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
878 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
879 bitmap_(bitmap),
880 begin_(begin),
881 end_(end) {
882 }
883
884 protected:
885 accounting::SpaceBitmap* const bitmap_;
886 const uintptr_t begin_;
887 const uintptr_t end_;
888
889 virtual void Finalize() {
890 delete this;
891 }
892
893 // Scans all of the objects
894 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
895 ScanObjectParallelVisitor visitor(this);
896 bitmap_->VisitMarkedRange(begin_, end_, visitor);
897 // Finish by emptying our local mark stack.
898 MarkStackTask::Run(self);
899 }
900};
901
Carl Shapiro58551df2011-07-24 03:09:51 -0700902// Populates the mark stack based on the set of marked objects and
903// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800904void MarkSweep::RecursiveMark() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800905 TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800906 // RecursiveMark will build the lists of known instances of the Reference classes. See
907 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700908 if (kUseRecursiveMark) {
909 const bool partial = GetGcType() == kGcTypePartial;
910 ScanObjectVisitor scan_visitor(this);
911 auto* self = Thread::Current();
912 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700913 size_t thread_count = GetThreadCount(false);
914 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700915 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700916 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700917 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
918 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700919 current_space_bitmap_ = space->GetMarkBitmap();
920 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700921 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800922 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700923 if (parallel) {
924 // We will use the mark stack the future.
925 // CHECK(mark_stack_->IsEmpty());
926 // This function does not handle heap end increasing, so we must use the space end.
927 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
928 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
929 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
930
931 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700932 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700933 while (begin != end) {
934 uintptr_t start = begin;
935 uintptr_t delta = (end - begin) / n;
936 delta = RoundUp(delta, KB);
937 if (delta < 16 * KB) delta = end - begin;
938 begin += delta;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700939 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700940 begin);
941 thread_pool->AddTask(self, task);
942 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700943 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700944 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700945 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700946 thread_pool->StopWorkers(self);
947 } else {
948 // This function does not handle heap end increasing, so we must use the space end.
949 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
950 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700951 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700952 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700953 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700954 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700955 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700956 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700957}
958
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800959mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700960 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700961 return object;
962 }
963 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700964}
965
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700966void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
967 ScanGrayObjects(paused, minimum_age);
968 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700969}
970
Carl Shapiro58551df2011-07-24 03:09:51 -0700971void MarkSweep::ReMarkRoots() {
Mathieu Chartier893263b2014-03-04 11:07:42 -0800972 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800973 timings_.StartSplit("(Paused)ReMarkRoots");
Mathieu Chartier893263b2014-03-04 11:07:42 -0800974 Runtime::Current()->VisitRoots(
975 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
976 kVisitRootFlagStopLoggingNewRoots |
977 kVisitRootFlagClearRootLog));
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700978 timings_.EndSplit();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800979 if (kVerifyRoots) {
980 timings_.StartSplit("(Paused)VerifyRoots");
981 Runtime::Current()->VisitRoots(VerifyRootMarked, this);
982 timings_.EndSplit();
983 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700984}
985
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700986void MarkSweep::SweepSystemWeaks() {
987 Runtime* runtime = Runtime::Current();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700988 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800989 runtime->SweepSystemWeaks(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700990 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700991}
992
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700993mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700994 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
995 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700996 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700997}
998
999void MarkSweep::VerifyIsLive(const Object* obj) {
1000 Heap* heap = GetHeap();
1001 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001002 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001003 if (!large_object_space->GetLiveObjects()->Test(obj)) {
1004 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1005 heap->allocation_stack_->End()) {
1006 // Object not found!
1007 heap->DumpSpaces();
1008 LOG(FATAL) << "Found dead object " << obj;
1009 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001010 }
1011 }
1012}
1013
1014void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001015 // Verify system weaks, uses a special object visitor which returns the input object.
1016 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001017}
1018
Mathieu Chartier0e4627e2012-10-23 16:13:36 -07001019class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001020 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001021 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001022
1023 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001024 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001025 // Note: self is not necessarily equal to thread since thread may be suspended.
1026 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001027 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1028 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -08001029 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001030 ATRACE_END();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001031 if (kUseThreadLocalAllocationStack) {
1032 thread->RevokeThreadLocalAllocationStack();
1033 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001034 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint) {
1035 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1036 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001037 mark_sweep_->GetBarrier().Pass(self);
1038 }
1039
1040 private:
1041 MarkSweep* mark_sweep_;
1042};
1043
Ian Rogers1d54e732013-05-02 21:10:01 -07001044void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001045 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001046 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001047 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001048 // Request the check point is run on all threads returning a count of the threads that must
1049 // run through the barrier including self.
1050 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1051 // Release locks then wait for all mutator threads to pass the barrier.
1052 // TODO: optimize to not release locks when there are no threads to wait for.
1053 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1054 Locks::mutator_lock_->SharedUnlock(self);
1055 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1056 CHECK_EQ(old_state, kWaitingPerformingGc);
1057 gc_barrier_->Increment(self, barrier_count);
1058 self->SetState(kWaitingPerformingGc);
1059 Locks::mutator_lock_->SharedLock(self);
1060 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001061 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001062}
1063
Ian Rogers1d54e732013-05-02 21:10:01 -07001064void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001065 timings_.StartSplit("SweepArray");
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001066 Thread* self = Thread::Current();
1067 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1068 size_t chunk_free_pos = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001069 size_t freed_bytes = 0;
1070 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001071 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001072 size_t freed_large_objects = 0;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001073 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001074 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001075 size_t count = allocations->Size();
1076 // Change the order to ensure that the non-moving space last swept as an optimization.
1077 std::vector<space::ContinuousSpace*> sweep_spaces;
1078 space::ContinuousSpace* non_moving_space = nullptr;
1079 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001080 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1081 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001082 if (space == heap_->GetNonMovingSpace()) {
1083 non_moving_space = space;
1084 } else {
1085 sweep_spaces.push_back(space);
1086 }
1087 }
1088 }
1089 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1090 // the other alloc spaces as an optimization.
1091 if (non_moving_space != nullptr) {
1092 sweep_spaces.push_back(non_moving_space);
1093 }
1094 // Start by sweeping the continuous spaces.
1095 for (space::ContinuousSpace* space : sweep_spaces) {
1096 space::AllocSpace* alloc_space = space->AsAllocSpace();
1097 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1098 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1099 if (swap_bitmaps) {
1100 std::swap(live_bitmap, mark_bitmap);
1101 }
1102 Object** out = objects;
1103 for (size_t i = 0; i < count; ++i) {
1104 Object* obj = objects[i];
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001105 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1106 continue;
1107 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001108 if (space->HasAddress(obj)) {
1109 // This object is in the space, remove it from the array and add it to the sweep buffer
1110 // if needed.
1111 if (!mark_bitmap->Test(obj)) {
1112 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1113 timings_.StartSplit("FreeList");
1114 freed_objects += chunk_free_pos;
1115 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1116 timings_.EndSplit();
1117 chunk_free_pos = 0;
1118 }
1119 chunk_free_buffer[chunk_free_pos++] = obj;
1120 }
1121 } else {
1122 *(out++) = obj;
1123 }
1124 }
1125 if (chunk_free_pos > 0) {
1126 timings_.StartSplit("FreeList");
1127 freed_objects += chunk_free_pos;
1128 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1129 timings_.EndSplit();
1130 chunk_free_pos = 0;
1131 }
1132 // All of the references which space contained are no longer in the allocation stack, update
1133 // the count.
1134 count = out - objects;
1135 }
1136 // Handle the large object space.
1137 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001138 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
1139 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001140 if (swap_bitmaps) {
1141 std::swap(large_live_objects, large_mark_objects);
1142 }
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001143 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001144 Object* obj = objects[i];
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001145 // Handle large objects.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001146 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1147 continue;
1148 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001149 if (!large_mark_objects->Test(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001150 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001151 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001152 }
1153 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001154 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001155
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001156 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001157 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001158 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001159 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001160 freed_objects_.FetchAndAdd(freed_objects);
1161 freed_large_objects_.FetchAndAdd(freed_large_objects);
1162 freed_bytes_.FetchAndAdd(freed_bytes);
1163 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001164 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001165
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001166 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001167 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001168 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001169}
1170
Ian Rogers1d54e732013-05-02 21:10:01 -07001171void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001172 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -08001173 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001174 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001175 if (space->IsContinuousMemMapAllocSpace()) {
1176 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierec050072014-01-07 16:00:07 -08001177 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001178 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -08001179 size_t freed_objects = 0;
1180 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001181 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -08001182 heap_->RecordFree(freed_objects, freed_bytes);
1183 freed_objects_.FetchAndAdd(freed_objects);
1184 freed_bytes_.FetchAndAdd(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001185 }
1186 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001187 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001188}
1189
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001190void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001191 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001192 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001193 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001194 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001195 freed_large_objects_.FetchAndAdd(freed_objects);
1196 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001197 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001198}
1199
Carl Shapiro69759ea2011-07-21 18:13:35 -07001200// Process the "referent" field in a java.lang.ref.Reference. If the
1201// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001202// the heap for later processing.
1203void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1204 DCHECK(klass != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001205 if (kCountJavaLangRefs) {
1206 ++reference_count_;
1207 }
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -07001208 heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001209}
1210
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001211class MarkObjectVisitor {
1212 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001213 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001214
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001215 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001216 void operator()(Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001217 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001218 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001219 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001220 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1221 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1222 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001223 mark_sweep_->MarkObject(ref);
1224 }
1225
1226 private:
1227 MarkSweep* const mark_sweep_;
1228};
1229
Carl Shapiro69759ea2011-07-21 18:13:35 -07001230// Scans an object reference. Determines the type of the reference
1231// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001232void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001233 MarkObjectVisitor visitor(this);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001234 ScanObjectVisit(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001235}
1236
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001237void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001238 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
1239}
1240
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001241void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001242 Thread* self = Thread::Current();
1243 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001244 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1245 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001246 CHECK_GT(chunk_size, 0U);
1247 // Split the current mark stack up into work tasks.
1248 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1249 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001250 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001251 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001252 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001253 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001254 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001255 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001256 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001257 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001258 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001259}
1260
Ian Rogers5d76c432011-10-31 21:42:49 -07001261// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001262void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001263 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001264 size_t thread_count = GetThreadCount(paused);
1265 if (kParallelProcessMarkStack && thread_count > 1 &&
1266 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1267 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001268 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001269 // TODO: Tune this.
1270 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001271 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001272 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001273 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001274 if (kUseMarkStackPrefetch) {
1275 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001276 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001277 DCHECK(obj != NULL);
1278 __builtin_prefetch(obj);
1279 prefetch_fifo.push_back(obj);
1280 }
1281 if (prefetch_fifo.empty()) {
1282 break;
1283 }
1284 obj = prefetch_fifo.front();
1285 prefetch_fifo.pop_front();
1286 } else {
1287 if (mark_stack_->IsEmpty()) {
1288 break;
1289 }
1290 obj = mark_stack_->PopBack();
1291 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001292 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001293 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001294 }
1295 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001296 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001297}
1298
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001299inline bool MarkSweep::IsMarked(const Object* object) const
1300 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001301 if (immune_region_.ContainsObject(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001302 return true;
1303 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001304 if (current_space_bitmap_->HasAddress(object)) {
1305 return current_space_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001306 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001307 return mark_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001308}
1309
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001310void MarkSweep::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001311 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Anwar Ghuloum46543222013-08-12 09:28:42 -07001312 // Can't enqueue references if we hold the mutator lock.
Ian Rogers1d54e732013-05-02 21:10:01 -07001313 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001314 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001315 heap->PostGcVerification(this);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001316 // Update the cumulative statistics.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001317 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1318 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001319 // Ensure that the mark stack is empty.
1320 CHECK(mark_stack_->IsEmpty());
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001321 if (kCountScannedTypes) {
1322 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1323 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001324 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001325 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001326 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001327 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001328 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001329 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001330 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001331 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001332 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001333 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001334 if (kCountJavaLangRefs) {
1335 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001336 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001337 if (kCountMarkedObjects) {
1338 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
1339 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
1340 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001341 // Update the cumulative loggers.
1342 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001343 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001344 cumulative_timings_.End();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001345 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001346 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001347 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
1348 if (bitmap != nullptr &&
1349 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1350 bitmap->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001351 }
1352 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001353 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001354 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001355 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001356 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001357}
1358
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001359void MarkSweep::RevokeAllThreadLocalBuffers() {
1360 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1361 // If concurrent, rosalloc thread-local buffers are revoked at the
1362 // thread checkpoint. Bump pointer space thread-local buffers must
1363 // not be in use.
1364 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1365 } else {
1366 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
1367 GetHeap()->RevokeAllThreadLocalBuffers();
1368 timings_.EndSplit();
1369 }
1370}
1371
Ian Rogers1d54e732013-05-02 21:10:01 -07001372} // namespace collector
1373} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001374} // namespace art