blob: fbb349eea1eebeefa8badf9feae655c69cb8b8dc [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070030#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070034#include "gc/reference_processor.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/space/image_space.h"
36#include "gc/space/large_object_space.h"
37#include "gc/space/space-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080038#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070039#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "mirror/object-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070041#include "runtime.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070042#include "scoped_thread_state_change.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070043#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070044#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070045
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070046using ::art::mirror::Object;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080047
Carl Shapiro69759ea2011-07-21 18:13:35 -070048namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070049namespace gc {
50namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070051
Mathieu Chartier02b6a782012-10-26 13:51:26 -070052// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080053static constexpr bool kUseRecursiveMark = false;
54static constexpr bool kUseMarkStackPrefetch = true;
55static constexpr size_t kSweepArrayChunkFreeSize = 1024;
56static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070057
58// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080059static constexpr bool kParallelCardScan = true;
60static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070061// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
62// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
63// having this can add overhead in ProcessReferences since we may end up doing many calls of
64// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080065static constexpr size_t kMinimumParallelMarkStackSize = 128;
66static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070067
Mathieu Chartier02b6a782012-10-26 13:51:26 -070068// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080069static constexpr bool kProfileLargeObjects = false;
70static constexpr bool kMeasureOverhead = false;
71static constexpr bool kCountTasks = false;
72static constexpr bool kCountJavaLangRefs = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070073static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070074
75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080076static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier7bf9f192014-04-04 11:09:41 -070077static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070078
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070079// If true, revoke the rosalloc thread-local buffers at the
80// checkpoint, as opposed to during the pause.
81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82
Mathieu Chartier2b82db42012-11-14 17:29:05 -080083void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -070084 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080086 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070087 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -070088 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070089 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080090 }
91 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -070092 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -080093}
94
Ian Rogers1d54e732013-05-02 21:10:01 -070095MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
96 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -070097 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -070098 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Ian Rogers3e5cf302014-05-20 16:40:37 -070099 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800100 gc_barrier_(new Barrier(0)),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700101 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Ian Rogers3e5cf302014-05-20 16:40:37 -0700102 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700103 std::string error_msg;
104 MemMap* mem_map = MemMap::MapAnonymous(
105 "mark sweep sweep array free buffer", nullptr,
106 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
107 PROT_READ | PROT_WRITE, false, &error_msg);
108 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
109 sweep_array_free_buffer_mem_map_.reset(mem_map);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800110}
111
112void MarkSweep::InitializePhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800113 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700114 mark_stack_ = heap_->GetMarkStack();
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700115 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700116 immune_region_.Reset();
Ian Rogers3e5cf302014-05-20 16:40:37 -0700117 class_count_.StoreRelaxed(0);
118 array_count_.StoreRelaxed(0);
119 other_count_.StoreRelaxed(0);
120 large_object_test_.StoreRelaxed(0);
121 large_object_mark_.StoreRelaxed(0);
122 overhead_time_ .StoreRelaxed(0);
123 work_chunks_created_.StoreRelaxed(0);
124 work_chunks_deleted_.StoreRelaxed(0);
125 reference_count_.StoreRelaxed(0);
126 mark_null_count_.StoreRelaxed(0);
127 mark_immune_count_.StoreRelaxed(0);
128 mark_fastpath_count_.StoreRelaxed(0);
129 mark_slowpath_count_.StoreRelaxed(0);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700130 {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700131 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700132 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
133 mark_bitmap_ = heap_->GetMarkBitmap();
134 }
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700135 if (!clear_soft_references_) {
136 // Always clear soft references if a non-sticky collection.
137 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky;
138 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700139}
140
141void MarkSweep::RunPhases() {
142 Thread* self = Thread::Current();
143 InitializePhase();
144 Locks::mutator_lock_->AssertNotHeld(self);
145 if (IsConcurrent()) {
146 GetHeap()->PreGcVerification(this);
147 {
148 ReaderMutexLock mu(self, *Locks::mutator_lock_);
149 MarkingPhase();
150 }
151 ScopedPause pause(this);
152 GetHeap()->PrePauseRosAllocVerification(this);
153 PausePhase();
154 RevokeAllThreadLocalBuffers();
155 } else {
156 ScopedPause pause(this);
157 GetHeap()->PreGcVerificationPaused(this);
158 MarkingPhase();
159 GetHeap()->PrePauseRosAllocVerification(this);
160 PausePhase();
161 RevokeAllThreadLocalBuffers();
162 }
163 {
164 // Sweeping always done concurrently, even for non concurrent mark sweep.
165 ReaderMutexLock mu(self, *Locks::mutator_lock_);
166 ReclaimPhase();
167 }
168 GetHeap()->PostGcVerification(this);
169 FinishPhase();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800170}
171
172void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800173 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700175 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier308351a2014-06-15 12:39:02 -0700176 true, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700177 &ProcessMarkStackCallback, this);
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700178}
179
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700180void MarkSweep::PausePhase() {
181 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800182 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800183 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700184 if (IsConcurrent()) {
185 // Handle the dirty objects if we are a concurrent GC.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 // Re-mark root set.
188 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700190 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800191 }
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700192 {
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700193 TimingLogger::ScopedSplit split("SwapStacks", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700195 heap_->SwapStacks(self);
196 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
197 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
198 // stacks and don't want anybody to allocate into the live stack.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800199 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800200 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700201 timings_.StartSplit("PreSweepingGcVerification");
202 heap_->PreSweepingGcVerification(this);
203 timings_.EndSplit();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700204 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
205 // weak before we sweep them. Since this new system weak may not be marked, the GC may
206 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
207 // reference to a string that is about to be swept.
208 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700209 // Enable the reference processing slow path, needs to be done with mutators paused since there
210 // is no lock in the GetReferent fast path.
211 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800212}
213
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800214void MarkSweep::PreCleanCards() {
215 // Don't do this for non concurrent GCs since they don't have any dirty cards.
216 if (kPreCleanCards && IsConcurrent()) {
217 Thread* self = Thread::Current();
218 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
219 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800220 heap_->ProcessCards(timings_, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800221 // The checkpoint root marking is required to avoid a race condition which occurs if the
222 // following happens during a reference write:
223 // 1. mutator dirties the card (write barrier)
224 // 2. GC ages the card (the above ProcessCards call)
225 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
226 // 4. mutator writes the value (corresponding to the write barrier in 1.)
227 // This causes the GC to age the card but not necessarily mark the reference which the mutator
228 // wrote into the object stored in the card.
229 // Having the checkpoint fixes this issue since it ensures that the card mark and the
230 // reference write are visible to the GC before the card is scanned (this is due to locks being
231 // acquired / released in the checkpoint code).
232 // The other roots are also marked to help reduce the pause.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700233 MarkRootsCheckpoint(self, false);
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800234 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800235 MarkConcurrentRoots(
236 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800237 // Process the newly aged cards.
238 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
239 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
240 // in the next GC.
241 }
242}
243
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800244void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
245 if (kUseThreadLocalAllocationStack) {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700246 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800247 Locks::mutator_lock_->AssertExclusiveHeld(self);
248 heap_->RevokeAllThreadLocalAllocationStacks(self);
249 }
250}
251
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800252void MarkSweep::MarkingPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800253 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800254 Thread* self = Thread::Current();
255
256 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700257 FindDefaultSpaceBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700258
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800259 // Process dirty cards and add dirty cards to mod union tables.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800260 heap_->ProcessCards(timings_, false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800262 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800263 MarkRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800264 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800265 // Pre-clean dirtied cards to reduce pauses.
266 PreCleanCards();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800267}
268
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700269void MarkSweep::UpdateAndMarkModUnion() {
270 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700271 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700272 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
273 "UpdateAndMarkImageModUnionTable";
Ian Rogers5fe9af72013-11-14 00:17:20 -0800274 TimingLogger::ScopedSplit split(name, &timings_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700275 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
276 CHECK(mod_union_table != nullptr);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800277 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700278 }
279 }
280}
281
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800282void MarkSweep::MarkReachableObjects() {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700283 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800284 // Recursively mark all the non-image bits set in the mark bitmap.
285 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800286}
287
288void MarkSweep::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800289 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700290 Thread* self = Thread::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700291 // Process the references concurrently.
292 ProcessReferences(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700293 SweepSystemWeaks(self);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700294 Runtime::Current()->AllowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800295 {
296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
297
298 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700299 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800300
301 // Swap the live and mark bitmaps for each space which we modified space. This is an
302 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
303 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700304 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800305 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700306 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800307
308 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800309 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
310 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800311 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800312}
313
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700314void MarkSweep::FindDefaultSpaceBitmap() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800315 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700316 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700317 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700318 // We want to have the main space instead of non moving if possible.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700319 if (bitmap != nullptr &&
320 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700321 current_space_bitmap_ = bitmap;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700322 // If we are not the non moving space exit the loop early since this will be good enough.
323 if (space != heap_->GetNonMovingSpace()) {
324 break;
325 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700326 }
327 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700328 if (current_space_bitmap_ == nullptr) {
329 heap_->DumpSpaces();
330 LOG(FATAL) << "Could not find a default mark bitmap";
331 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700332}
333
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800334void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700335 ResizeMarkStack(mark_stack_->Capacity() * 2);
336}
337
338void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800339 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800340 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
341 // Someone else acquired the lock and expanded the mark stack before us.
342 return;
343 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700344 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700345 CHECK_LE(mark_stack_->Size(), new_size);
346 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700347 for (const auto& obj : temp) {
348 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800349 }
350}
351
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700352inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700353 DCHECK(obj != nullptr);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800354 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700355 MutexLock mu(Thread::Current(), mark_stack_lock_);
356 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700357 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800358 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700359 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700360 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800361 }
362}
363
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800364mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800365 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
366 mark_sweep->MarkObject(obj);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800367 return obj;
368}
369
Mathieu Chartier407f7022014-02-18 14:37:05 -0800370void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
371 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
372}
373
Mathieu Chartier308351a2014-06-15 12:39:02 -0700374bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
375 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
376}
377
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700378class MarkSweepMarkObjectSlowPath {
379 public:
380 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
381 }
382
383 void operator()(const Object* obj) const ALWAYS_INLINE {
384 if (kProfileLargeObjects) {
385 // TODO: Differentiate between marking and testing somehow.
386 ++mark_sweep_->large_object_test_;
387 ++mark_sweep_->large_object_mark_;
388 }
389 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiera17288e2014-05-08 17:53:19 -0700390 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700391 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
392 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
393 LOG(ERROR) << "Attempting see if it's a bad root";
394 mark_sweep_->VerifyRoots();
395 LOG(FATAL) << "Can't mark invalid object";
396 }
397 }
398
399 private:
400 MarkSweep* const mark_sweep_;
401};
402
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700403inline void MarkSweep::MarkObjectNonNull(Object* obj) {
404 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700405 if (kUseBakerOrBrooksReadBarrier) {
406 // Verify all the objects have the correct pointer installed.
407 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800408 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700409 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700410 if (kCountMarkedObjects) {
411 ++mark_immune_count_;
412 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700413 DCHECK(mark_bitmap_->Test(obj));
414 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
415 if (kCountMarkedObjects) {
416 ++mark_fastpath_count_;
417 }
418 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
419 PushOnMarkStack(obj); // This object was not previously marked.
420 }
421 } else {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700422 if (kCountMarkedObjects) {
423 ++mark_slowpath_count_;
424 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700425 MarkSweepMarkObjectSlowPath visitor(this);
426 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
427 // will check again.
428 if (!mark_bitmap_->Set(obj, visitor)) {
429 PushOnMarkStack(obj); // Was not already marked, push.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700430 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700431 }
432}
433
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700434inline void MarkSweep::PushOnMarkStack(Object* obj) {
435 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
436 // Lock is not needed but is here anyways to please annotalysis.
437 MutexLock mu(Thread::Current(), mark_stack_lock_);
438 ExpandMarkStack();
439 }
440 // The object must be pushed on to the mark stack.
441 mark_stack_->PushBack(obj);
442}
443
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700444inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700445 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700446 if (kUseBakerOrBrooksReadBarrier) {
447 // Verify all the objects have the correct pointer installed.
448 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800449 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700450 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700451 DCHECK(IsMarked(obj));
452 return false;
453 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700454 // Try to take advantage of locality of references within a space, failing this find the space
455 // the hard way.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700456 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700457 if (LIKELY(object_bitmap->HasAddress(obj))) {
458 return !object_bitmap->AtomicTestAndSet(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700459 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700460 MarkSweepMarkObjectSlowPath visitor(this);
461 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700462}
463
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700464// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
465inline void MarkSweep::MarkObject(Object* obj) {
466 if (obj != nullptr) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700467 MarkObjectNonNull(obj);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700468 } else if (kCountMarkedObjects) {
469 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700470 }
471}
472
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700473void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
Mathieu Chartier815873e2014-02-13 18:02:13 -0800474 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800475 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800476}
477
Mathieu Chartier893263b2014-03-04 11:07:42 -0800478void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
479 RootType /*root_type*/) {
480 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
481}
482
Mathieu Chartier815873e2014-02-13 18:02:13 -0800483void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
484 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800485 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
486}
487
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700488void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700489 const StackVisitor* visitor, RootType root_type) {
490 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700491}
492
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700493void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
494 RootType root_type) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700495 // See if the root is on any space bitmap.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700496 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700497 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700498 if (!large_object_space->Contains(root)) {
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700499 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800500 if (visitor != NULL) {
501 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700502 }
503 }
504 }
505}
506
507void MarkSweep::VerifyRoots() {
508 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
509}
510
Mathieu Chartier893263b2014-03-04 11:07:42 -0800511void MarkSweep::MarkRoots(Thread* self) {
512 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
513 // If we exclusively hold the mutator lock, all threads must be suspended.
514 timings_.StartSplit("MarkRoots");
515 Runtime::Current()->VisitRoots(MarkRootCallback, this);
516 timings_.EndSplit();
517 RevokeAllThreadLocalAllocationStacks(self);
518 } else {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700519 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800520 // At this point the live stack should no longer have any mutators which push into it.
521 MarkNonThreadRoots();
522 MarkConcurrentRoots(
523 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
524 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700525}
526
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700527void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700528 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700529 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700530 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700531}
532
Mathieu Chartier893263b2014-03-04 11:07:42 -0800533void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700534 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700535 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier893263b2014-03-04 11:07:42 -0800536 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700537 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700538}
539
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700540class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700541 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700542 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
543 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700544
Mathieu Chartier407f7022014-02-18 14:37:05 -0800545 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
546 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700547 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800548 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
549 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
550 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700551 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700552 }
553
554 private:
555 MarkSweep* const mark_sweep_;
556};
557
Mathieu Chartier407f7022014-02-18 14:37:05 -0800558class DelayReferenceReferentVisitor {
559 public:
560 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
561 }
562
563 void operator()(mirror::Class* klass, mirror::Reference* ref) const
564 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
565 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
566 collector_->DelayReferenceReferent(klass, ref);
567 }
568
569 private:
570 MarkSweep* const collector_;
571};
572
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700573template <bool kUseFinger = false>
574class MarkStackTask : public Task {
575 public:
576 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700577 Object** mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700578 : mark_sweep_(mark_sweep),
579 thread_pool_(thread_pool),
580 mark_stack_pos_(mark_stack_size) {
581 // We may have to copy part of an existing mark stack when another mark stack overflows.
582 if (mark_stack_size != 0) {
583 DCHECK(mark_stack != NULL);
584 // TODO: Check performance?
585 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700586 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700587 if (kCountTasks) {
588 ++mark_sweep_->work_chunks_created_;
589 }
590 }
591
592 static const size_t kMaxSize = 1 * KB;
593
594 protected:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800595 class MarkObjectParallelVisitor {
596 public:
597 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
598 MarkSweep* mark_sweep) ALWAYS_INLINE
599 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
600
601 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
602 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700603 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800604 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
605 if (kUseFinger) {
606 android_memory_barrier();
607 if (reinterpret_cast<uintptr_t>(ref) >=
Ian Rogers3e5cf302014-05-20 16:40:37 -0700608 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800609 return;
610 }
611 }
612 chunk_task_->MarkStackPush(ref);
613 }
614 }
615
616 private:
617 MarkStackTask<kUseFinger>* const chunk_task_;
618 MarkSweep* const mark_sweep_;
619 };
620
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700621 class ScanObjectParallelVisitor {
622 public:
623 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
624 : chunk_task_(chunk_task) {}
625
Mathieu Chartier407f7022014-02-18 14:37:05 -0800626 // No thread safety analysis since multiple threads will use this visitor.
627 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
628 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
629 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
630 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
631 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
632 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700633 }
634
635 private:
636 MarkStackTask<kUseFinger>* const chunk_task_;
637 };
638
639 virtual ~MarkStackTask() {
640 // Make sure that we have cleared our mark stack.
641 DCHECK_EQ(mark_stack_pos_, 0U);
642 if (kCountTasks) {
643 ++mark_sweep_->work_chunks_deleted_;
644 }
645 }
646
647 MarkSweep* const mark_sweep_;
648 ThreadPool* const thread_pool_;
649 // Thread local mark stack for this task.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700650 Object* mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700651 // Mark stack position.
652 size_t mark_stack_pos_;
653
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700654 void MarkStackPush(Object* obj) ALWAYS_INLINE {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700655 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
656 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
657 mark_stack_pos_ /= 2;
658 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
659 mark_stack_ + mark_stack_pos_);
660 thread_pool_->AddTask(Thread::Current(), task);
661 }
662 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700663 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700664 mark_stack_[mark_stack_pos_++] = obj;
665 }
666
667 virtual void Finalize() {
668 delete this;
669 }
670
671 // Scans all of the objects
Mathieu Chartier407f7022014-02-18 14:37:05 -0800672 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
673 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700674 ScanObjectParallelVisitor visitor(this);
675 // TODO: Tune this.
676 static const size_t kFifoSize = 4;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700677 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700678 for (;;) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700679 Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700680 if (kUseMarkStackPrefetch) {
681 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700682 Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700683 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700684 __builtin_prefetch(obj);
685 prefetch_fifo.push_back(obj);
686 }
687 if (UNLIKELY(prefetch_fifo.empty())) {
688 break;
689 }
690 obj = prefetch_fifo.front();
691 prefetch_fifo.pop_front();
692 } else {
693 if (UNLIKELY(mark_stack_pos_ == 0)) {
694 break;
695 }
696 obj = mark_stack_[--mark_stack_pos_];
697 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700698 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700699 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700700 }
701 }
702};
703
704class CardScanTask : public MarkStackTask<false> {
705 public:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700706 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
707 accounting::ContinuousSpaceBitmap* bitmap,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700708 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700709 Object** mark_stack_obj)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700710 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
711 bitmap_(bitmap),
712 begin_(begin),
713 end_(end),
714 minimum_age_(minimum_age) {
715 }
716
717 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700718 accounting::ContinuousSpaceBitmap* const bitmap_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700719 byte* const begin_;
720 byte* const end_;
721 const byte minimum_age_;
722
723 virtual void Finalize() {
724 delete this;
725 }
726
727 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
728 ScanObjectParallelVisitor visitor(this);
729 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700730 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700731 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
732 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700733 // Finish by emptying our local mark stack.
734 MarkStackTask::Run(self);
735 }
736};
737
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700738size_t MarkSweep::GetThreadCount(bool paused) const {
739 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700740 return 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700741 }
742 if (paused) {
743 return heap_->GetParallelGCThreadCount() + 1;
744 } else {
745 return heap_->GetConcGCThreadCount() + 1;
746 }
747}
748
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700749void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
750 accounting::CardTable* card_table = GetHeap()->GetCardTable();
751 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700752 size_t thread_count = GetThreadCount(paused);
753 // The parallel version with only one thread is faster for card scanning, TODO: fix.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700754 if (kParallelCardScan && thread_count > 1) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700755 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700756 // Can't have a different split for each space since multiple spaces can have their cards being
757 // scanned at the same time.
758 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
759 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700760 Object** mark_stack_begin = mark_stack_->Begin();
761 Object** mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700762 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700763 // Estimated number of work tasks we will create.
764 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
765 DCHECK_NE(mark_stack_tasks, 0U);
766 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
767 mark_stack_size / mark_stack_tasks + 1);
768 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700769 if (space->GetMarkBitmap() == nullptr) {
770 continue;
771 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700772 byte* card_begin = space->Begin();
773 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800774 // Align up the end address. For example, the image space's end
775 // may not be card-size-aligned.
776 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
777 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
778 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700779 // Calculate how many bytes of heap we will scan,
780 const size_t address_range = card_end - card_begin;
781 // Calculate how much address range each task gets.
782 const size_t card_delta = RoundUp(address_range / thread_count + 1,
783 accounting::CardTable::kCardSize);
784 // Create the worker tasks for this space.
785 while (card_begin != card_end) {
786 // Add a range of cards.
787 size_t addr_remaining = card_end - card_begin;
788 size_t card_increment = std::min(card_delta, addr_remaining);
789 // Take from the back of the mark stack.
790 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
791 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
792 mark_stack_end -= mark_stack_increment;
793 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700794 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700795 // Add the new task to the thread pool.
796 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
797 card_begin + card_increment, minimum_age,
798 mark_stack_increment, mark_stack_end);
799 thread_pool->AddTask(self, task);
800 card_begin += card_increment;
801 }
802 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700803
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800804 // Note: the card scan below may dirty new cards (and scan them)
805 // as a side effect when a Reference object is encountered and
806 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700807 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700808 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700809 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700810 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700811 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700812 } else {
813 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700814 if (space->GetMarkBitmap() != nullptr) {
815 // Image spaces are handled properly since live == marked for them.
816 switch (space->GetGcRetentionPolicy()) {
817 case space::kGcRetentionPolicyNeverCollect:
818 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
819 "ScanGrayImageSpaceObjects");
820 break;
821 case space::kGcRetentionPolicyFullCollect:
822 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
823 "ScanGrayZygoteSpaceObjects");
824 break;
825 case space::kGcRetentionPolicyAlwaysCollect:
826 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
827 "ScanGrayAllocSpaceObjects");
828 break;
829 }
830 ScanObjectVisitor visitor(this);
831 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
832 timings_.EndSplit();
833 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700834 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700835 }
836}
837
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700838class RecursiveMarkTask : public MarkStackTask<false> {
839 public:
840 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700841 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700842 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
843 bitmap_(bitmap),
844 begin_(begin),
845 end_(end) {
846 }
847
848 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700849 accounting::ContinuousSpaceBitmap* const bitmap_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700850 const uintptr_t begin_;
851 const uintptr_t end_;
852
853 virtual void Finalize() {
854 delete this;
855 }
856
857 // Scans all of the objects
858 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
859 ScanObjectParallelVisitor visitor(this);
860 bitmap_->VisitMarkedRange(begin_, end_, visitor);
861 // Finish by emptying our local mark stack.
862 MarkStackTask::Run(self);
863 }
864};
865
Carl Shapiro58551df2011-07-24 03:09:51 -0700866// Populates the mark stack based on the set of marked objects and
867// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800868void MarkSweep::RecursiveMark() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800869 TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800870 // RecursiveMark will build the lists of known instances of the Reference classes. See
871 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700872 if (kUseRecursiveMark) {
873 const bool partial = GetGcType() == kGcTypePartial;
874 ScanObjectVisitor scan_visitor(this);
875 auto* self = Thread::Current();
876 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700877 size_t thread_count = GetThreadCount(false);
878 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700879 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700880 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700881 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
882 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700883 current_space_bitmap_ = space->GetMarkBitmap();
884 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700885 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800886 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700887 if (parallel) {
888 // We will use the mark stack the future.
889 // CHECK(mark_stack_->IsEmpty());
890 // This function does not handle heap end increasing, so we must use the space end.
891 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
892 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers3e5cf302014-05-20 16:40:37 -0700893 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700894
895 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700896 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700897 while (begin != end) {
898 uintptr_t start = begin;
899 uintptr_t delta = (end - begin) / n;
900 delta = RoundUp(delta, KB);
901 if (delta < 16 * KB) delta = end - begin;
902 begin += delta;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700903 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700904 begin);
905 thread_pool->AddTask(self, task);
906 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700907 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700908 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700909 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700910 thread_pool->StopWorkers(self);
911 } else {
912 // This function does not handle heap end increasing, so we must use the space end.
913 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
914 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700915 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700916 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700917 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700918 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700919 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700920 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700921}
922
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800923mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700924 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700925 return object;
926 }
927 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700928}
929
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700930void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
931 ScanGrayObjects(paused, minimum_age);
932 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700933}
934
Carl Shapiro58551df2011-07-24 03:09:51 -0700935void MarkSweep::ReMarkRoots() {
Mathieu Chartier893263b2014-03-04 11:07:42 -0800936 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800937 timings_.StartSplit("(Paused)ReMarkRoots");
Mathieu Chartier893263b2014-03-04 11:07:42 -0800938 Runtime::Current()->VisitRoots(
939 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
940 kVisitRootFlagStopLoggingNewRoots |
941 kVisitRootFlagClearRootLog));
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700942 timings_.EndSplit();
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700943 if (kVerifyRootsMarked) {
Mathieu Chartier893263b2014-03-04 11:07:42 -0800944 timings_.StartSplit("(Paused)VerifyRoots");
945 Runtime::Current()->VisitRoots(VerifyRootMarked, this);
946 timings_.EndSplit();
947 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700948}
949
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700950void MarkSweep::SweepSystemWeaks(Thread* self) {
951 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700952 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700953 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700954 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -0700955}
956
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700957mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700958 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
959 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700960 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700961}
962
963void MarkSweep::VerifyIsLive(const Object* obj) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700964 if (!heap_->GetLiveBitmap()->Test(obj)) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700965 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
966 heap_->allocation_stack_->End()) {
967 // Object not found!
968 heap_->DumpSpaces();
969 LOG(FATAL) << "Found dead object " << obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700970 }
971 }
972}
973
974void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700975 // Verify system weaks, uses a special object visitor which returns the input object.
976 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700977}
978
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700979class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700980 public:
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700981 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
982 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
983 : mark_sweep_(mark_sweep),
984 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
985 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
986 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700987
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700988 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -0700989 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700990 // Note: self is not necessarily equal to thread since thread may be suspended.
991 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800992 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
993 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800994 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -0700995 ATRACE_END();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700996 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
997 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700998 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700999 ATRACE_END();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001000 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001001 mark_sweep_->GetBarrier().Pass(self);
1002 }
1003
1004 private:
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001005 MarkSweep* const mark_sweep_;
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001006 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001007};
1008
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001009void MarkSweep::MarkRootsCheckpoint(Thread* self,
1010 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1011 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001012 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001013 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001014 // Request the check point is run on all threads returning a count of the threads that must
1015 // run through the barrier including self.
1016 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1017 // Release locks then wait for all mutator threads to pass the barrier.
1018 // TODO: optimize to not release locks when there are no threads to wait for.
1019 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1020 Locks::mutator_lock_->SharedUnlock(self);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001021 {
1022 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1023 gc_barrier_->Increment(self, barrier_count);
1024 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001025 Locks::mutator_lock_->SharedLock(self);
1026 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001027 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001028}
1029
Ian Rogers1d54e732013-05-02 21:10:01 -07001030void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001031 timings_.StartSplit("SweepArray");
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001032 Thread* self = Thread::Current();
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -07001033 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1034 sweep_array_free_buffer_mem_map_->BaseBegin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001035 size_t chunk_free_pos = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001036 size_t freed_bytes = 0;
1037 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001038 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001039 size_t freed_large_objects = 0;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001040 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001041 Object** objects = allocations->Begin();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001042 size_t count = allocations->Size();
1043 // Change the order to ensure that the non-moving space last swept as an optimization.
1044 std::vector<space::ContinuousSpace*> sweep_spaces;
1045 space::ContinuousSpace* non_moving_space = nullptr;
1046 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001047 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1048 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001049 if (space == heap_->GetNonMovingSpace()) {
1050 non_moving_space = space;
1051 } else {
1052 sweep_spaces.push_back(space);
1053 }
1054 }
1055 }
1056 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1057 // the other alloc spaces as an optimization.
1058 if (non_moving_space != nullptr) {
1059 sweep_spaces.push_back(non_moving_space);
1060 }
1061 // Start by sweeping the continuous spaces.
1062 for (space::ContinuousSpace* space : sweep_spaces) {
1063 space::AllocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001064 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1065 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001066 if (swap_bitmaps) {
1067 std::swap(live_bitmap, mark_bitmap);
1068 }
1069 Object** out = objects;
1070 for (size_t i = 0; i < count; ++i) {
1071 Object* obj = objects[i];
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001072 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1073 continue;
1074 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001075 if (space->HasAddress(obj)) {
1076 // This object is in the space, remove it from the array and add it to the sweep buffer
1077 // if needed.
1078 if (!mark_bitmap->Test(obj)) {
1079 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1080 timings_.StartSplit("FreeList");
1081 freed_objects += chunk_free_pos;
1082 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1083 timings_.EndSplit();
1084 chunk_free_pos = 0;
1085 }
1086 chunk_free_buffer[chunk_free_pos++] = obj;
1087 }
1088 } else {
1089 *(out++) = obj;
1090 }
1091 }
1092 if (chunk_free_pos > 0) {
1093 timings_.StartSplit("FreeList");
1094 freed_objects += chunk_free_pos;
1095 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1096 timings_.EndSplit();
1097 chunk_free_pos = 0;
1098 }
1099 // All of the references which space contained are no longer in the allocation stack, update
1100 // the count.
1101 count = out - objects;
1102 }
1103 // Handle the large object space.
1104 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001105 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1106 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001107 if (swap_bitmaps) {
1108 std::swap(large_live_objects, large_mark_objects);
1109 }
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001110 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001111 Object* obj = objects[i];
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001112 // Handle large objects.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001113 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1114 continue;
1115 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001116 if (!large_mark_objects->Test(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001117 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001118 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001119 }
1120 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001121 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001122
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001123 timings_.StartSplit("RecordFree");
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001124 VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size "
1125 << PrettySize(freed_bytes);
1126 RecordFree(freed_objects, freed_bytes);
1127 RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001128 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001129
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001130 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001131 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001132 timings_.EndSplit();
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -07001133
Ian Rogersc5f17732014-06-05 20:48:42 -07001134 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001135}
1136
Ian Rogers1d54e732013-05-02 21:10:01 -07001137void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001138 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1139 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1140 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1141 // knowing that new allocations won't be marked as live.
1142 timings_.StartSplit("MarkStackAsLive");
1143 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1144 heap_->MarkAllocStackAsLive(live_stack);
1145 live_stack->Reset();
1146 timings_.EndSplit();
1147
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001148 DCHECK(mark_stack_->IsEmpty());
Mathieu Chartier02e25112013-08-14 16:14:24 -07001149 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001150 if (space->IsContinuousMemMapAllocSpace()) {
1151 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierec050072014-01-07 16:00:07 -08001152 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001153 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -08001154 size_t freed_objects = 0;
1155 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001156 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001157 RecordFree(freed_objects, freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001158 }
1159 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001160 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001161}
1162
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001163void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001164 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001165 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001166 size_t freed_bytes = 0;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -07001167 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartiere76e70f2014-05-02 16:35:37 -07001168 RecordFreeLargeObjects(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001169}
1170
Mathieu Chartier407f7022014-02-18 14:37:05 -08001171// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
1172// marked, put it on the appropriate list in the heap for later processing.
1173void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001174 if (kCountJavaLangRefs) {
1175 ++reference_count_;
1176 }
Mathieu Chartier308351a2014-06-15 12:39:02 -07001177 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
1178 this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001179}
1180
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001181class MarkObjectVisitor {
1182 public:
Mathieu Chartier407f7022014-02-18 14:37:05 -08001183 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1184 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001185
Mathieu Chartier407f7022014-02-18 14:37:05 -08001186 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1187 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1188 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001189 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001190 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1191 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1192 }
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001193 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001194 }
1195
1196 private:
1197 MarkSweep* const mark_sweep_;
1198};
1199
Carl Shapiro69759ea2011-07-21 18:13:35 -07001200// Scans an object reference. Determines the type of the reference
1201// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001202void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001203 MarkObjectVisitor mark_visitor(this);
1204 DelayReferenceReferentVisitor ref_visitor(this);
1205 ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001206}
1207
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001208void MarkSweep::ProcessMarkStackCallback(void* arg) {
1209 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001210}
1211
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001212void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001213 Thread* self = Thread::Current();
1214 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001215 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1216 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001217 CHECK_GT(chunk_size, 0U);
1218 // Split the current mark stack up into work tasks.
1219 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1220 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001221 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001222 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001223 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001224 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001225 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001226 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001227 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001228 mark_stack_->Reset();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001229 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1230 work_chunks_deleted_.LoadSequentiallyConsistent())
1231 << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001232}
1233
Ian Rogers5d76c432011-10-31 21:42:49 -07001234// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001235void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001236 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001237 size_t thread_count = GetThreadCount(paused);
1238 if (kParallelProcessMarkStack && thread_count > 1 &&
1239 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1240 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001241 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001242 // TODO: Tune this.
1243 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001244 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001245 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001246 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001247 if (kUseMarkStackPrefetch) {
1248 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001249 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001250 DCHECK(obj != NULL);
1251 __builtin_prefetch(obj);
1252 prefetch_fifo.push_back(obj);
1253 }
1254 if (prefetch_fifo.empty()) {
1255 break;
1256 }
1257 obj = prefetch_fifo.front();
1258 prefetch_fifo.pop_front();
1259 } else {
1260 if (mark_stack_->IsEmpty()) {
1261 break;
1262 }
1263 obj = mark_stack_->PopBack();
1264 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001265 DCHECK(obj != nullptr);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001266 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001267 }
1268 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001269 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001270}
1271
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001272inline bool MarkSweep::IsMarked(const Object* object) const {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001273 if (immune_region_.ContainsObject(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001274 return true;
1275 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001276 if (current_space_bitmap_->HasAddress(object)) {
1277 return current_space_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001278 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001279 return mark_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001280}
1281
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001282void MarkSweep::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001283 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001284 if (kCountScannedTypes) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001285 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1286 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001287 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001288 if (kCountTasks) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001289 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001290 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001291 if (kMeasureOverhead) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001292 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001293 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001294 if (kProfileLargeObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001295 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1296 << " marked " << large_object_mark_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001297 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001298 if (kCountJavaLangRefs) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001299 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001300 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001301 if (kCountMarkedObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001302 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1303 << " immune=" << mark_immune_count_.LoadRelaxed()
1304 << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1305 << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001306 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001307 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001308 mark_stack_->Reset();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001309 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1310 heap_->ClearMarkedObjects();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001311}
1312
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001313void MarkSweep::RevokeAllThreadLocalBuffers() {
1314 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1315 // If concurrent, rosalloc thread-local buffers are revoked at the
1316 // thread checkpoint. Bump pointer space thread-local buffers must
1317 // not be in use.
1318 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1319 } else {
1320 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
1321 GetHeap()->RevokeAllThreadLocalBuffers();
1322 timings_.EndSplit();
1323 }
1324}
1325
Ian Rogers1d54e732013-05-02 21:10:01 -07001326} // namespace collector
1327} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001328} // namespace art