blob: 4aff68a569339916e800df86ecc6a3bf61cb2e05 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier94c32c52013-08-09 11:14:04 -070024#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080028#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070031#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070037#include "indirect_reference_table.h"
38#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070039#include "jni_internal.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070040#include "monitor.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "mark_sweep-inl.h"
Brian Carlstromea46f952013-07-30 01:26:50 -070042#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080047#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070050#include "runtime.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070051#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070052#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070053#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070054
Brian Carlstromea46f952013-07-30 01:26:50 -070055using ::art::mirror::ArtField;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070056using ::art::mirror::Class;
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070057using ::art::mirror::Object;
58using ::art::mirror::ObjectArray;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080059
Carl Shapiro69759ea2011-07-21 18:13:35 -070060namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070061namespace gc {
62namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070063
Mathieu Chartier02b6a782012-10-26 13:51:26 -070064// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080065static constexpr bool kUseRecursiveMark = false;
66static constexpr bool kUseMarkStackPrefetch = true;
67static constexpr size_t kSweepArrayChunkFreeSize = 1024;
68static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070069
70// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080071static constexpr bool kParallelCardScan = true;
72static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070073// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
74// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
75// having this can add overhead in ProcessReferences since we may end up doing many calls of
76// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080077static constexpr size_t kMinimumParallelMarkStackSize = 128;
78static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070079
Mathieu Chartier02b6a782012-10-26 13:51:26 -070080// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080081static constexpr bool kCountClassesMarked = false;
82static constexpr bool kProfileLargeObjects = false;
83static constexpr bool kMeasureOverhead = false;
84static constexpr bool kCountTasks = false;
85static constexpr bool kCountJavaLangRefs = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070086
87// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080088static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier893263b2014-03-04 11:07:42 -080089static constexpr bool kVerifyRoots = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070090
Ian Rogers1d54e732013-05-02 21:10:01 -070091void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080092 // Bind live to mark bitmap if necessary.
93 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080094 CHECK(space->IsContinuousMemMapAllocSpace());
95 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -080096 }
97
98 // Add the space to the immune region.
Mathieu Chartier590fee92013-09-13 13:46:47 -070099 // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
100 // callbacks.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800101 if (immune_begin_ == NULL) {
102 DCHECK(immune_end_ == NULL);
103 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
104 reinterpret_cast<Object*>(space->End()));
105 } else {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700106 const space::ContinuousSpace* prev_space = nullptr;
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 // Find out if the previous space is immune.
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700108 for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700109 if (cur_space == space) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700110 break;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800111 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700112 prev_space = cur_space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700113 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700114 // If previous space was immune, then extend the immune region. Relies on continuous spaces
115 // being sorted by Heap::AddContinuousSpace.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700116 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800117 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
118 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
119 }
120 }
121}
122
Mathieu Chartier590fee92013-09-13 13:46:47 -0700123bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700124 return
125 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
126 immune_end_ >= reinterpret_cast<Object*>(space->End());
127}
128
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800129void MarkSweep::BindBitmaps() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700130 timings_.StartSplit("BindBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800131 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800132 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700133 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700134 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800135 ImmuneSpace(space);
136 }
137 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700138 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800139}
140
Ian Rogers1d54e732013-05-02 21:10:01 -0700141MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
142 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700143 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -0700144 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
145 current_mark_bitmap_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700146 mark_stack_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700147 immune_begin_(NULL),
148 immune_end_(NULL),
Ian Rogers906457c2013-11-13 23:28:08 -0800149 live_stack_freeze_size_(0),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800150 gc_barrier_(new Barrier(0)),
Ian Rogers62d6c772013-02-27 08:32:07 -0800151 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700152 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700153 is_concurrent_(is_concurrent) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800154}
155
156void MarkSweep::InitializePhase() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700157 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800158 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700159 mark_stack_ = heap_->mark_stack_.get();
160 DCHECK(mark_stack_ != nullptr);
161 SetImmuneRange(nullptr, nullptr);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800162 class_count_ = 0;
163 array_count_ = 0;
164 other_count_ = 0;
165 large_object_test_ = 0;
166 large_object_mark_ = 0;
167 classes_marked_ = 0;
168 overhead_time_ = 0;
169 work_chunks_created_ = 0;
170 work_chunks_deleted_ = 0;
171 reference_count_ = 0;
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700172
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700173 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700174
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700175 // Do any pre GC verification.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700176 timings_.NewSplit("PreGcVerification");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800177 heap_->PreGcVerification(this);
178}
179
180void MarkSweep::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800181 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800182 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800183 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800184 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800185}
186
187bool MarkSweep::HandleDirtyObjectsPhase() {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800188 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800190 Locks::mutator_lock_->AssertExclusiveHeld(self);
191
192 {
193 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
194
195 // Re-mark root set.
196 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800197
198 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700199 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800200 }
201
202 ProcessReferences(self);
203
204 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700205 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
206 GetHeap()->verify_post_gc_heap_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800207 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
208 // This second sweep makes sure that we don't have any objects in the live stack which point to
209 // freed objects. These cause problems since their references may be previously freed objects.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700210 SweepArray(GetHeap()->allocation_stack_.get(), false);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800211 // Since SweepArray() above resets the (active) allocation
212 // stack. Need to revoke the thread-local allocation stacks that
213 // point into it.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800214 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800215 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700216
217 timings_.StartSplit("PreSweepingGcVerification");
218 heap_->PreSweepingGcVerification(this);
219 timings_.EndSplit();
220
221 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
222 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
223 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700224
225 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
226 // weak before we sweep them. Since this new system weak may not be marked, the GC may
227 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
228 // reference to a string that is about to be swept.
229 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800230 return true;
231}
232
233bool MarkSweep::IsConcurrent() const {
234 return is_concurrent_;
235}
236
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800237void MarkSweep::PreCleanCards() {
238 // Don't do this for non concurrent GCs since they don't have any dirty cards.
239 if (kPreCleanCards && IsConcurrent()) {
240 Thread* self = Thread::Current();
241 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
242 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
243 heap_->ProcessCards(timings_);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800244 // The checkpoint root marking is required to avoid a race condition which occurs if the
245 // following happens during a reference write:
246 // 1. mutator dirties the card (write barrier)
247 // 2. GC ages the card (the above ProcessCards call)
248 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
249 // 4. mutator writes the value (corresponding to the write barrier in 1.)
250 // This causes the GC to age the card but not necessarily mark the reference which the mutator
251 // wrote into the object stored in the card.
252 // Having the checkpoint fixes this issue since it ensures that the card mark and the
253 // reference write are visible to the GC before the card is scanned (this is due to locks being
254 // acquired / released in the checkpoint code).
255 // The other roots are also marked to help reduce the pause.
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800256 MarkThreadRoots(self);
257 // TODO: Only mark the dirty roots.
258 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800259 MarkConcurrentRoots(
260 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800261 // Process the newly aged cards.
262 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
263 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
264 // in the next GC.
265 }
266}
267
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800268void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
269 if (kUseThreadLocalAllocationStack) {
270 Locks::mutator_lock_->AssertExclusiveHeld(self);
271 heap_->RevokeAllThreadLocalAllocationStacks(self);
272 }
273}
274
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800275void MarkSweep::MarkingPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800276 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800277 Thread* self = Thread::Current();
278
279 BindBitmaps();
280 FindDefaultMarkBitmap();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700281
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800282 // Process dirty cards and add dirty cards to mod union tables.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700283 heap_->ProcessCards(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800284
285 // Need to do this before the checkpoint since we don't want any threads to add references to
286 // the live stack during the recursive mark.
Anwar Ghuloum46543222013-08-12 09:28:42 -0700287 timings_.NewSplit("SwapStacks");
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800288 heap_->SwapStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800289
290 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800291 MarkRoots(self);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700292 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700293 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800294 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800295 // Pre-clean dirtied cards to reduce pauses.
296 PreCleanCards();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800297}
298
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700299void MarkSweep::UpdateAndMarkModUnion() {
300 for (const auto& space : heap_->GetContinuousSpaces()) {
301 if (IsImmuneSpace(space)) {
302 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
303 "UpdateAndMarkImageModUnionTable";
Ian Rogers5fe9af72013-11-14 00:17:20 -0800304 TimingLogger::ScopedSplit split(name, &timings_);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700305 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
306 CHECK(mod_union_table != nullptr);
Mathieu Chartier815873e2014-02-13 18:02:13 -0800307 mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700308 }
309 }
310}
311
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700312void MarkSweep::MarkThreadRoots(Thread* self) {
313 MarkRootsCheckpoint(self);
314}
315
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800316void MarkSweep::MarkReachableObjects() {
317 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
318 // knowing that new allocations won't be marked as live.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700319 timings_.StartSplit("MarkStackAsLive");
Ian Rogers1d54e732013-05-02 21:10:01 -0700320 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700321 heap_->MarkAllocStackAsLive(live_stack);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800322 live_stack->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700323 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800324 // Recursively mark all the non-image bits set in the mark bitmap.
325 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800326}
327
328void MarkSweep::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800329 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier720ef762013-08-17 14:46:54 -0700330 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800331
332 if (!IsConcurrent()) {
333 ProcessReferences(self);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700334 }
335
336 {
337 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
338 SweepSystemWeaks();
339 }
340
341 if (IsConcurrent()) {
342 Runtime::Current()->AllowNewSystemWeaks();
343
Ian Rogers5fe9af72013-11-14 00:17:20 -0800344 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700345 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700346 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800347 if (!kPreCleanCards) {
348 // The allocation stack contains things allocated since the start of the GC. These may have
349 // been marked during this GC meaning they won't be eligible for reclaiming in the next
350 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next
351 // sticky GC.
352 // There is a race here which is safely handled. Another thread such as the hprof could
353 // have flushed the alloc stack after we resumed the threads. This is safe however, since
354 // reseting the allocation stack zeros it out with madvise. This means that we will either
355 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
356 // first place.
357 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on
358 // a dirty card since we aged cards during the pre-cleaning process.
359 mirror::Object** end = allocation_stack->End();
360 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
361 const Object* obj = *it;
362 if (obj != nullptr) {
363 UnMarkObjectNonNull(obj);
364 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700365 }
366 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800367 }
368
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800369 {
370 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
371
372 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700373 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800374
375 // Swap the live and mark bitmaps for each space which we modified space. This is an
376 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
377 // bitmaps.
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700378 timings_.StartSplit("SwapBitmaps");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800379 SwapBitmaps();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700380 timings_.EndSplit();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800381
382 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800383 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
384 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800385 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800386}
387
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800388void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
389 immune_begin_ = begin;
390 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700391}
392
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700393void MarkSweep::FindDefaultMarkBitmap() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800394 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700395 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700396 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
397 if (bitmap != nullptr &&
398 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
399 current_mark_bitmap_ = bitmap;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700400 CHECK(current_mark_bitmap_ != NULL);
401 return;
402 }
403 }
404 GetHeap()->DumpSpaces();
405 LOG(FATAL) << "Could not find a default mark bitmap";
406}
407
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800408void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700409 ResizeMarkStack(mark_stack_->Capacity() * 2);
410}
411
412void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800413 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800414 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
415 // Someone else acquired the lock and expanded the mark stack before us.
416 return;
417 }
Mathieu Chartier02e25112013-08-14 16:14:24 -0700418 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700419 CHECK_LE(mark_stack_->Size(), new_size);
420 mark_stack_->Resize(new_size);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700421 for (const auto& obj : temp) {
422 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800423 }
424}
425
Mathieu Chartier9642c962013-08-05 17:40:36 -0700426inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800427 DCHECK(obj != NULL);
428 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700429 MutexLock mu(Thread::Current(), mark_stack_lock_);
430 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700431 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800432 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700433 // The object must be pushed on to the mark stack.
434 mark_stack_->PushBack(const_cast<Object*>(obj));
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800435 }
436}
437
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800438mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800439 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
440 mark_sweep->MarkObject(obj);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800441 return obj;
442}
443
Mathieu Chartier9642c962013-08-05 17:40:36 -0700444inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
445 DCHECK(!IsImmune(obj));
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800446
447 if (kUseBrooksPointer) {
448 // Verify all the objects have the correct Brooks pointer installed.
449 obj->AssertSelfBrooksPointer();
450 }
451
Mathieu Chartier9642c962013-08-05 17:40:36 -0700452 // Try to take advantage of locality of references within a space, failing this find the space
453 // the hard way.
454 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
455 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
456 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
457 if (LIKELY(new_bitmap != NULL)) {
458 object_bitmap = new_bitmap;
459 } else {
460 MarkLargeObject(obj, false);
461 return;
462 }
463 }
464
465 DCHECK(object_bitmap->HasAddress(obj));
466 object_bitmap->Clear(obj);
467}
468
469inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700470 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700471
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800472 if (kUseBrooksPointer) {
473 // Verify all the objects have the correct Brooks pointer installed.
474 obj->AssertSelfBrooksPointer();
475 }
476
Mathieu Chartier9642c962013-08-05 17:40:36 -0700477 if (IsImmune(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700478 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700479 return;
480 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700481
482 // Try to take advantage of locality of references within a space, failing this find the space
483 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700484 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700485 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700486 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
487 if (LIKELY(new_bitmap != NULL)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700488 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700489 } else {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700490 MarkLargeObject(obj, true);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700491 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700492 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700493 }
494
Carl Shapiro69759ea2011-07-21 18:13:35 -0700495 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700496 if (!object_bitmap->Test(obj)) {
497 object_bitmap->Set(obj);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700498 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700499 // Lock is not needed but is here anyways to please annotalysis.
500 MutexLock mu(Thread::Current(), mark_stack_lock_);
Mathieu Chartier184e3222013-08-03 14:02:57 -0700501 ExpandMarkStack();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700502 }
Mathieu Chartier184e3222013-08-03 14:02:57 -0700503 // The object must be pushed on to the mark stack.
504 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700505 }
506}
507
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700508// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
Mathieu Chartier9642c962013-08-05 17:40:36 -0700509bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700510 // TODO: support >1 discontinuous space.
511 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800512 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700513 if (kProfileLargeObjects) {
514 ++large_object_test_;
515 }
516 if (UNLIKELY(!large_objects->Test(obj))) {
Mathieu Chartier4fcb8d32013-07-15 12:43:36 -0700517 if (!large_object_space->Contains(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700518 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
519 LOG(ERROR) << "Attempting see if it's a bad root";
520 VerifyRoots();
521 LOG(FATAL) << "Can't mark bad root";
522 }
523 if (kProfileLargeObjects) {
524 ++large_object_mark_;
525 }
Mathieu Chartier9642c962013-08-05 17:40:36 -0700526 if (set) {
527 large_objects->Set(obj);
528 } else {
529 large_objects->Clear(obj);
530 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700531 return true;
532 }
533 return false;
534}
535
536inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
537 DCHECK(obj != NULL);
538
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800539 if (kUseBrooksPointer) {
540 // Verify all the objects have the correct Brooks pointer installed.
541 obj->AssertSelfBrooksPointer();
542 }
543
Mathieu Chartier9642c962013-08-05 17:40:36 -0700544 if (IsImmune(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700545 DCHECK(IsMarked(obj));
546 return false;
547 }
548
549 // Try to take advantage of locality of references within a space, failing this find the space
550 // the hard way.
Ian Rogers1d54e732013-05-02 21:10:01 -0700551 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700552 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700553 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700554 if (new_bitmap != NULL) {
555 object_bitmap = new_bitmap;
556 } else {
557 // TODO: Remove the Thread::Current here?
558 // TODO: Convert this to some kind of atomic marking?
559 MutexLock mu(Thread::Current(), large_object_lock_);
Mathieu Chartier9642c962013-08-05 17:40:36 -0700560 return MarkLargeObject(obj, true);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700561 }
562 }
563
564 // Return true if the object was not previously marked.
565 return !object_bitmap->AtomicTestAndSet(obj);
566}
567
Carl Shapiro69759ea2011-07-21 18:13:35 -0700568// Used to mark objects when recursing. Recursion is done by moving
569// the finger across the bitmaps in address order and marking child
570// objects. Any newly-marked objects whose addresses are lower than
571// the finger won't be visited by the bitmap scan, so those objects
572// need to be added to the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700573inline void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700574 if (obj != NULL) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700575 MarkObjectNonNull(obj);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700576 }
577}
578
Mathieu Chartier815873e2014-02-13 18:02:13 -0800579void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
580 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800581 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800582}
583
Mathieu Chartier893263b2014-03-04 11:07:42 -0800584void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
585 RootType /*root_type*/) {
586 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
587}
588
Mathieu Chartier815873e2014-02-13 18:02:13 -0800589void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
590 RootType /*root_type*/) {
Mathieu Chartier815873e2014-02-13 18:02:13 -0800591 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
592}
593
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700594void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800595 const StackVisitor* visitor) {
596 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700597}
598
Ian Rogers40e3bac2012-11-20 00:09:14 -0800599void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700600 // See if the root is on any space bitmap.
Ian Rogers1d54e732013-05-02 21:10:01 -0700601 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
602 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700603 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700604 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers40e3bac2012-11-20 00:09:14 -0800605 if (visitor != NULL) {
606 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700607 }
608 }
609 }
610}
611
612void MarkSweep::VerifyRoots() {
613 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
614}
615
Mathieu Chartier893263b2014-03-04 11:07:42 -0800616void MarkSweep::MarkRoots(Thread* self) {
617 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
618 // If we exclusively hold the mutator lock, all threads must be suspended.
619 timings_.StartSplit("MarkRoots");
620 Runtime::Current()->VisitRoots(MarkRootCallback, this);
621 timings_.EndSplit();
622 RevokeAllThreadLocalAllocationStacks(self);
623 } else {
624 MarkThreadRoots(self);
625 // At this point the live stack should no longer have any mutators which push into it.
626 MarkNonThreadRoots();
627 MarkConcurrentRoots(
628 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
629 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700630}
631
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700632void MarkSweep::MarkNonThreadRoots() {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700633 timings_.StartSplit("MarkNonThreadRoots");
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700634 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700635 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700636}
637
Mathieu Chartier893263b2014-03-04 11:07:42 -0800638void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700639 timings_.StartSplit("MarkConcurrentRoots");
Ian Rogers1d54e732013-05-02 21:10:01 -0700640 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier893263b2014-03-04 11:07:42 -0800641 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700642 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700643}
644
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700645class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700646 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700647 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
648 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700649
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800650 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700651 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700652 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800653 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
654 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
655 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700656 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700657 }
658
659 private:
660 MarkSweep* const mark_sweep_;
661};
662
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700663template <bool kUseFinger = false>
664class MarkStackTask : public Task {
665 public:
666 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
667 const Object** mark_stack)
668 : mark_sweep_(mark_sweep),
669 thread_pool_(thread_pool),
670 mark_stack_pos_(mark_stack_size) {
671 // We may have to copy part of an existing mark stack when another mark stack overflows.
672 if (mark_stack_size != 0) {
673 DCHECK(mark_stack != NULL);
674 // TODO: Check performance?
675 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700676 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700677 if (kCountTasks) {
678 ++mark_sweep_->work_chunks_created_;
679 }
680 }
681
682 static const size_t kMaxSize = 1 * KB;
683
684 protected:
685 class ScanObjectParallelVisitor {
686 public:
687 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
688 : chunk_task_(chunk_task) {}
689
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700690 void operator()(Object* obj) const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700691 MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
692 mark_sweep->ScanObjectVisit(obj,
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700693 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100694 bool /* is_static */) ALWAYS_INLINE_LAMBDA {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700695 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
696 if (kUseFinger) {
697 android_memory_barrier();
698 if (reinterpret_cast<uintptr_t>(ref) >=
699 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
700 return;
701 }
702 }
703 chunk_task_->MarkStackPush(ref);
704 }
705 });
706 }
707
708 private:
709 MarkStackTask<kUseFinger>* const chunk_task_;
710 };
711
712 virtual ~MarkStackTask() {
713 // Make sure that we have cleared our mark stack.
714 DCHECK_EQ(mark_stack_pos_, 0U);
715 if (kCountTasks) {
716 ++mark_sweep_->work_chunks_deleted_;
717 }
718 }
719
720 MarkSweep* const mark_sweep_;
721 ThreadPool* const thread_pool_;
722 // Thread local mark stack for this task.
723 const Object* mark_stack_[kMaxSize];
724 // Mark stack position.
725 size_t mark_stack_pos_;
726
727 void MarkStackPush(const Object* obj) ALWAYS_INLINE {
728 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
729 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
730 mark_stack_pos_ /= 2;
731 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
732 mark_stack_ + mark_stack_pos_);
733 thread_pool_->AddTask(Thread::Current(), task);
734 }
735 DCHECK(obj != nullptr);
736 DCHECK(mark_stack_pos_ < kMaxSize);
737 mark_stack_[mark_stack_pos_++] = obj;
738 }
739
740 virtual void Finalize() {
741 delete this;
742 }
743
744 // Scans all of the objects
745 virtual void Run(Thread* self) {
746 ScanObjectParallelVisitor visitor(this);
747 // TODO: Tune this.
748 static const size_t kFifoSize = 4;
749 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
750 for (;;) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700751 const Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700752 if (kUseMarkStackPrefetch) {
753 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
754 const Object* obj = mark_stack_[--mark_stack_pos_];
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700755 DCHECK(obj != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700756 __builtin_prefetch(obj);
757 prefetch_fifo.push_back(obj);
758 }
759 if (UNLIKELY(prefetch_fifo.empty())) {
760 break;
761 }
762 obj = prefetch_fifo.front();
763 prefetch_fifo.pop_front();
764 } else {
765 if (UNLIKELY(mark_stack_pos_ == 0)) {
766 break;
767 }
768 obj = mark_stack_[--mark_stack_pos_];
769 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700770 DCHECK(obj != nullptr);
771 visitor(const_cast<mirror::Object*>(obj));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700772 }
773 }
774};
775
776class CardScanTask : public MarkStackTask<false> {
777 public:
778 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
779 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
780 const Object** mark_stack_obj)
781 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
782 bitmap_(bitmap),
783 begin_(begin),
784 end_(end),
785 minimum_age_(minimum_age) {
786 }
787
788 protected:
789 accounting::SpaceBitmap* const bitmap_;
790 byte* const begin_;
791 byte* const end_;
792 const byte minimum_age_;
793
794 virtual void Finalize() {
795 delete this;
796 }
797
798 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
799 ScanObjectParallelVisitor visitor(this);
800 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700801 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700802 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
803 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700804 // Finish by emptying our local mark stack.
805 MarkStackTask::Run(self);
806 }
807};
808
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700809size_t MarkSweep::GetThreadCount(bool paused) const {
810 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
811 return 0;
812 }
813 if (paused) {
814 return heap_->GetParallelGCThreadCount() + 1;
815 } else {
816 return heap_->GetConcGCThreadCount() + 1;
817 }
818}
819
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700820void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
821 accounting::CardTable* card_table = GetHeap()->GetCardTable();
822 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700823 size_t thread_count = GetThreadCount(paused);
824 // The parallel version with only one thread is faster for card scanning, TODO: fix.
825 if (kParallelCardScan && thread_count > 0) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700826 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700827 // Can't have a different split for each space since multiple spaces can have their cards being
828 // scanned at the same time.
829 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
830 // Try to take some of the mark stack since we can pass this off to the worker tasks.
831 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
832 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
Mathieu Chartier720ef762013-08-17 14:46:54 -0700833 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700834 // Estimated number of work tasks we will create.
835 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
836 DCHECK_NE(mark_stack_tasks, 0U);
837 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
838 mark_stack_size / mark_stack_tasks + 1);
839 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700840 if (space->GetMarkBitmap() == nullptr) {
841 continue;
842 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700843 byte* card_begin = space->Begin();
844 byte* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800845 // Align up the end address. For example, the image space's end
846 // may not be card-size-aligned.
847 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
848 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
849 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700850 // Calculate how many bytes of heap we will scan,
851 const size_t address_range = card_end - card_begin;
852 // Calculate how much address range each task gets.
853 const size_t card_delta = RoundUp(address_range / thread_count + 1,
854 accounting::CardTable::kCardSize);
855 // Create the worker tasks for this space.
856 while (card_begin != card_end) {
857 // Add a range of cards.
858 size_t addr_remaining = card_end - card_begin;
859 size_t card_increment = std::min(card_delta, addr_remaining);
860 // Take from the back of the mark stack.
861 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
862 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
863 mark_stack_end -= mark_stack_increment;
864 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Ian Rogersb48b9eb2014-02-28 16:20:21 -0800865 DCHECK_EQ(mark_stack_end, const_cast<const art::mirror::Object **>(mark_stack_->End()));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700866 // Add the new task to the thread pool.
867 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
868 card_begin + card_increment, minimum_age,
869 mark_stack_increment, mark_stack_end);
870 thread_pool->AddTask(self, task);
871 card_begin += card_increment;
872 }
873 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700874
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800875 // Note: the card scan below may dirty new cards (and scan them)
876 // as a side effect when a Reference object is encountered and
877 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700878 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700879 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700880 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700881 thread_pool->StopWorkers(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -0700882 timings_.EndSplit();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700883 } else {
884 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700885 if (space->GetMarkBitmap() != nullptr) {
886 // Image spaces are handled properly since live == marked for them.
887 switch (space->GetGcRetentionPolicy()) {
888 case space::kGcRetentionPolicyNeverCollect:
889 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
890 "ScanGrayImageSpaceObjects");
891 break;
892 case space::kGcRetentionPolicyFullCollect:
893 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
894 "ScanGrayZygoteSpaceObjects");
895 break;
896 case space::kGcRetentionPolicyAlwaysCollect:
897 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
898 "ScanGrayAllocSpaceObjects");
899 break;
900 }
901 ScanObjectVisitor visitor(this);
902 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
903 timings_.EndSplit();
904 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700905 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700906 }
907}
908
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700909class RecursiveMarkTask : public MarkStackTask<false> {
910 public:
911 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
912 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
913 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
914 bitmap_(bitmap),
915 begin_(begin),
916 end_(end) {
917 }
918
919 protected:
920 accounting::SpaceBitmap* const bitmap_;
921 const uintptr_t begin_;
922 const uintptr_t end_;
923
924 virtual void Finalize() {
925 delete this;
926 }
927
928 // Scans all of the objects
929 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
930 ScanObjectParallelVisitor visitor(this);
931 bitmap_->VisitMarkedRange(begin_, end_, visitor);
932 // Finish by emptying our local mark stack.
933 MarkStackTask::Run(self);
934 }
935};
936
Carl Shapiro58551df2011-07-24 03:09:51 -0700937// Populates the mark stack based on the set of marked objects and
938// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800939void MarkSweep::RecursiveMark() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800940 TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800941 // RecursiveMark will build the lists of known instances of the Reference classes. See
942 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700943 if (kUseRecursiveMark) {
944 const bool partial = GetGcType() == kGcTypePartial;
945 ScanObjectVisitor scan_visitor(this);
946 auto* self = Thread::Current();
947 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700948 size_t thread_count = GetThreadCount(false);
949 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700950 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700951 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700952 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
953 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800954 current_mark_bitmap_ = space->GetMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700955 if (current_mark_bitmap_ == nullptr) {
956 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800957 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700958 if (parallel) {
959 // We will use the mark stack the future.
960 // CHECK(mark_stack_->IsEmpty());
961 // This function does not handle heap end increasing, so we must use the space end.
962 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
963 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
964 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
965
966 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700967 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700968 while (begin != end) {
969 uintptr_t start = begin;
970 uintptr_t delta = (end - begin) / n;
971 delta = RoundUp(delta, KB);
972 if (delta < 16 * KB) delta = end - begin;
973 begin += delta;
974 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
975 begin);
976 thread_pool->AddTask(self, task);
977 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700978 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700979 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700980 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700981 thread_pool->StopWorkers(self);
982 } else {
983 // This function does not handle heap end increasing, so we must use the space end.
984 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
985 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
986 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
987 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700988 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700989 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700990 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700991 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700992}
993
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800994mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700995 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700996 return object;
997 }
998 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700999}
1000
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001001void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
1002 ScanGrayObjects(paused, minimum_age);
1003 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001004}
1005
Carl Shapiro58551df2011-07-24 03:09:51 -07001006void MarkSweep::ReMarkRoots() {
Mathieu Chartier893263b2014-03-04 11:07:42 -08001007 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001008 timings_.StartSplit("(Paused)ReMarkRoots");
Mathieu Chartier893263b2014-03-04 11:07:42 -08001009 Runtime::Current()->VisitRoots(
1010 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
1011 kVisitRootFlagStopLoggingNewRoots |
1012 kVisitRootFlagClearRootLog));
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001013 timings_.EndSplit();
Mathieu Chartier893263b2014-03-04 11:07:42 -08001014 if (kVerifyRoots) {
1015 timings_.StartSplit("(Paused)VerifyRoots");
1016 Runtime::Current()->VisitRoots(VerifyRootMarked, this);
1017 timings_.EndSplit();
1018 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001019}
1020
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001021void MarkSweep::SweepSystemWeaks() {
1022 Runtime* runtime = Runtime::Current();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001023 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -08001024 runtime->SweepSystemWeaks(IsMarkedCallback, this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001025 timings_.EndSplit();
Carl Shapiro58551df2011-07-24 03:09:51 -07001026}
1027
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001028mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001029 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1030 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001031 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001032}
1033
1034void MarkSweep::VerifyIsLive(const Object* obj) {
1035 Heap* heap = GetHeap();
1036 if (!heap->GetLiveBitmap()->Test(obj)) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001037 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001038 if (!large_object_space->GetLiveObjects()->Test(obj)) {
1039 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
1040 heap->allocation_stack_->End()) {
1041 // Object not found!
1042 heap->DumpSpaces();
1043 LOG(FATAL) << "Found dead object " << obj;
1044 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001045 }
1046 }
1047}
1048
1049void MarkSweep::VerifySystemWeaks() {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001050 // Verify system weaks, uses a special object visitor which returns the input object.
1051 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001052}
1053
Mathieu Chartier0e4627e2012-10-23 16:13:36 -07001054class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001055 public:
Brian Carlstrom93ba8932013-07-17 21:31:49 -07001056 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001057
1058 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001059 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001060 // Note: self is not necessarily equal to thread since thread may be suspended.
1061 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001062 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1063 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -08001064 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001065 ATRACE_END();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001066 if (kUseThreadLocalAllocationStack) {
1067 thread->RevokeThreadLocalAllocationStack();
1068 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001069 mark_sweep_->GetBarrier().Pass(self);
1070 }
1071
1072 private:
1073 MarkSweep* mark_sweep_;
1074};
1075
Ian Rogers1d54e732013-05-02 21:10:01 -07001076void MarkSweep::MarkRootsCheckpoint(Thread* self) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001077 CheckpointMarkThreadRoots check_point(this);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001078 timings_.StartSplit("MarkRootsCheckpoint");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001079 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001080 // Request the check point is run on all threads returning a count of the threads that must
1081 // run through the barrier including self.
1082 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1083 // Release locks then wait for all mutator threads to pass the barrier.
1084 // TODO: optimize to not release locks when there are no threads to wait for.
1085 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1086 Locks::mutator_lock_->SharedUnlock(self);
1087 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1088 CHECK_EQ(old_state, kWaitingPerformingGc);
1089 gc_barrier_->Increment(self, barrier_count);
1090 self->SetState(kWaitingPerformingGc);
1091 Locks::mutator_lock_->SharedLock(self);
1092 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001093 timings_.EndSplit();
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001094}
1095
Ian Rogers1d54e732013-05-02 21:10:01 -07001096void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001097 timings_.StartSplit("SweepArray");
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001098 Thread* self = Thread::Current();
1099 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1100 size_t chunk_free_pos = 0;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001101 size_t freed_bytes = 0;
1102 size_t freed_large_object_bytes = 0;
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001103 size_t freed_objects = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001104 size_t freed_large_objects = 0;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001105 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001106 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001107 size_t count = allocations->Size();
1108 // Change the order to ensure that the non-moving space last swept as an optimization.
1109 std::vector<space::ContinuousSpace*> sweep_spaces;
1110 space::ContinuousSpace* non_moving_space = nullptr;
1111 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1112 if (space->IsAllocSpace() && !IsImmuneSpace(space) && space->GetLiveBitmap() != nullptr) {
1113 if (space == heap_->GetNonMovingSpace()) {
1114 non_moving_space = space;
1115 } else {
1116 sweep_spaces.push_back(space);
1117 }
1118 }
1119 }
1120 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1121 // the other alloc spaces as an optimization.
1122 if (non_moving_space != nullptr) {
1123 sweep_spaces.push_back(non_moving_space);
1124 }
1125 // Start by sweeping the continuous spaces.
1126 for (space::ContinuousSpace* space : sweep_spaces) {
1127 space::AllocSpace* alloc_space = space->AsAllocSpace();
1128 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1129 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1130 if (swap_bitmaps) {
1131 std::swap(live_bitmap, mark_bitmap);
1132 }
1133 Object** out = objects;
1134 for (size_t i = 0; i < count; ++i) {
1135 Object* obj = objects[i];
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001136 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1137 continue;
1138 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001139 if (space->HasAddress(obj)) {
1140 // This object is in the space, remove it from the array and add it to the sweep buffer
1141 // if needed.
1142 if (!mark_bitmap->Test(obj)) {
1143 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1144 timings_.StartSplit("FreeList");
1145 freed_objects += chunk_free_pos;
1146 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1147 timings_.EndSplit();
1148 chunk_free_pos = 0;
1149 }
1150 chunk_free_buffer[chunk_free_pos++] = obj;
1151 }
1152 } else {
1153 *(out++) = obj;
1154 }
1155 }
1156 if (chunk_free_pos > 0) {
1157 timings_.StartSplit("FreeList");
1158 freed_objects += chunk_free_pos;
1159 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1160 timings_.EndSplit();
1161 chunk_free_pos = 0;
1162 }
1163 // All of the references which space contained are no longer in the allocation stack, update
1164 // the count.
1165 count = out - objects;
1166 }
1167 // Handle the large object space.
1168 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001169 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
1170 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001171 if (swap_bitmaps) {
1172 std::swap(large_live_objects, large_mark_objects);
1173 }
Brian Carlstrom02c8cc62013-07-18 15:54:44 -07001174 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001175 Object* obj = objects[i];
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001176 // Handle large objects.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001177 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1178 continue;
1179 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001180 if (!large_mark_objects->Test(obj)) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001181 ++freed_large_objects;
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001182 freed_large_object_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001183 }
1184 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001185 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001186
Hiroshi Yamauchib22a4512013-08-13 15:03:22 -07001187 timings_.StartSplit("RecordFree");
Mathieu Chartier40e978b2012-09-07 11:38:36 -07001188 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001189 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere53225c2013-08-19 10:59:11 -07001190 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001191 freed_objects_.FetchAndAdd(freed_objects);
1192 freed_large_objects_.FetchAndAdd(freed_large_objects);
1193 freed_bytes_.FetchAndAdd(freed_bytes);
1194 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001195 timings_.EndSplit();
Ian Rogers1d54e732013-05-02 21:10:01 -07001196
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001197 timings_.StartSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001198 allocations->Reset();
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001199 timings_.EndSplit();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001200}
1201
Ian Rogers1d54e732013-05-02 21:10:01 -07001202void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001203 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -08001204 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001205 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001206 if (space->IsContinuousMemMapAllocSpace()) {
1207 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierec050072014-01-07 16:00:07 -08001208 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001209 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -08001210 size_t freed_objects = 0;
1211 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001212 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -08001213 heap_->RecordFree(freed_objects, freed_bytes);
1214 freed_objects_.FetchAndAdd(freed_objects);
1215 freed_bytes_.FetchAndAdd(freed_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -07001216 }
1217 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001218 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001219}
1220
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001221void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001222 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001223 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001224 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -08001225 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -08001226 freed_large_objects_.FetchAndAdd(freed_objects);
1227 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001228 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001229}
1230
Carl Shapiro69759ea2011-07-21 18:13:35 -07001231// Process the "referent" field in a java.lang.ref.Reference. If the
1232// referent has not yet been marked, put it on the appropriate list in
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001233// the heap for later processing.
1234void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1235 DCHECK(klass != nullptr);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001236 DCHECK(klass->IsReferenceClass());
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001237 DCHECK(obj != NULL);
Mathieu Chartier39e32612013-11-12 16:28:05 -08001238 heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001239}
1240
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001241class MarkObjectVisitor {
1242 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001243 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001244
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001245 // TODO: Fixme when anotatalysis works with visitors.
Brian Carlstromdf629502013-07-17 22:39:56 -07001246 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001247 bool /* is_static */) const ALWAYS_INLINE
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001248 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001249 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001250 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1251 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1252 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001253 mark_sweep_->MarkObject(ref);
1254 }
1255
1256 private:
1257 MarkSweep* const mark_sweep_;
1258};
1259
Carl Shapiro69759ea2011-07-21 18:13:35 -07001260// Scans an object reference. Determines the type of the reference
1261// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001262void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001263 MarkObjectVisitor visitor(this);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001264 ScanObjectVisit(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001265}
1266
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001267void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
1268 DCHECK(arg != nullptr);
1269 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
1270}
1271
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001272void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001273 Thread* self = Thread::Current();
1274 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001275 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1276 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001277 CHECK_GT(chunk_size, 0U);
1278 // Split the current mark stack up into work tasks.
1279 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1280 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1281 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1282 const_cast<const mirror::Object**>(it)));
1283 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001284 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001285 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001286 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001287 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001288 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001289 mark_stack_->Reset();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001290 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001291}
1292
Ian Rogers5d76c432011-10-31 21:42:49 -07001293// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001294void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001295 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001296 size_t thread_count = GetThreadCount(paused);
1297 if (kParallelProcessMarkStack && thread_count > 1 &&
1298 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1299 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001300 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001301 // TODO: Tune this.
1302 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001303 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001304 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001305 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001306 if (kUseMarkStackPrefetch) {
1307 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001308 Object* obj = mark_stack_->PopBack();
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001309 DCHECK(obj != NULL);
1310 __builtin_prefetch(obj);
1311 prefetch_fifo.push_back(obj);
1312 }
1313 if (prefetch_fifo.empty()) {
1314 break;
1315 }
1316 obj = prefetch_fifo.front();
1317 prefetch_fifo.pop_front();
1318 } else {
1319 if (mark_stack_->IsEmpty()) {
1320 break;
1321 }
1322 obj = mark_stack_->PopBack();
1323 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001324 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001325 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001326 }
1327 }
Anwar Ghuloum4446ab92013-08-09 21:17:25 -07001328 timings_.EndSplit();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001329}
1330
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001331inline bool MarkSweep::IsMarked(const Object* object) const
1332 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier9642c962013-08-05 17:40:36 -07001333 if (IsImmune(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001334 return true;
1335 }
1336 DCHECK(current_mark_bitmap_ != NULL);
1337 if (current_mark_bitmap_->HasAddress(object)) {
1338 return current_mark_bitmap_->Test(object);
1339 }
1340 return heap_->GetMarkBitmap()->Test(object);
1341}
1342
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001343void MarkSweep::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -08001344 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Anwar Ghuloum46543222013-08-12 09:28:42 -07001345 // Can't enqueue references if we hold the mutator lock.
Ian Rogers1d54e732013-05-02 21:10:01 -07001346 Heap* heap = GetHeap();
Anwar Ghuloum46543222013-08-12 09:28:42 -07001347 timings_.NewSplit("PostGcVerification");
Ian Rogers1d54e732013-05-02 21:10:01 -07001348 heap->PostGcVerification(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001349
Anwar Ghuloum46543222013-08-12 09:28:42 -07001350 timings_.NewSplit("RequestHeapTrim");
Ian Rogers1d54e732013-05-02 21:10:01 -07001351 heap->RequestHeapTrim();
Mathieu Chartier65db8802012-11-20 12:36:46 -08001352
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001353 // Update the cumulative statistics
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001354 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1355 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001356
1357 // Ensure that the mark stack is empty.
1358 CHECK(mark_stack_->IsEmpty());
1359
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001360 if (kCountScannedTypes) {
1361 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1362 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001363 }
1364
1365 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001366 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001367 }
1368
1369 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001370 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001371 }
1372
1373 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001374 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001375 }
1376
1377 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001378 VLOG(gc) << "Classes marked " << classes_marked_;
1379 }
1380
1381 if (kCountJavaLangRefs) {
1382 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001383 }
1384
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001385 // Update the cumulative loggers.
1386 cumulative_timings_.Start();
Anwar Ghuloum6f28d912013-07-24 15:02:53 -07001387 cumulative_timings_.AddLogger(timings_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001388 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001389
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001390 // Clear all of the spaces' mark bitmaps.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001391 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001392 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
1393 if (bitmap != nullptr &&
1394 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1395 bitmap->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001396 }
1397 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001398 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001399
1400 // Reset the marked large objects.
Ian Rogers1d54e732013-05-02 21:10:01 -07001401 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001402 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001403}
1404
Ian Rogers1d54e732013-05-02 21:10:01 -07001405} // namespace collector
1406} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001407} // namespace art