blob: 2d1f3120bfc1a13d78c752f0bc3d80792f77a490 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Yabin Cuic7df66e2015-04-15 15:40:18 -070019#include <atomic>
Mathieu Chartier2b82db42012-11-14 17:29:05 -080020#include <functional>
21#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <climits>
23#include <vector>
24
Ian Rogerscf7f1912014-10-22 22:06:39 -070025#define ATRACE_TAG ATRACE_TAG_DALVIK
26#include "cutils/trace.h"
27
Mathieu Chartier94c32c52013-08-09 11:14:04 -070028#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080029#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080030#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080031#include "base/mutex-inl.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010032#include "base/time_utils.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080033#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070034#include "gc/accounting/card_table-inl.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070035#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070036#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/space_bitmap-inl.h"
38#include "gc/heap.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070039#include "gc/reference_processor.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/space/large_object_space.h"
41#include "gc/space/space-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042#include "mark_sweep-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "mirror/object-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070044#include "runtime.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070045#include "scoped_thread_state_change.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070046#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070047#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070048
Carl Shapiro69759ea2011-07-21 18:13:35 -070049namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070050namespace gc {
51namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Mathieu Chartier02b6a782012-10-26 13:51:26 -070053// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080054static constexpr bool kUseRecursiveMark = false;
55static constexpr bool kUseMarkStackPrefetch = true;
56static constexpr size_t kSweepArrayChunkFreeSize = 1024;
57static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070058
59// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080060static constexpr bool kParallelCardScan = true;
61static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070062// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
63// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
64// having this can add overhead in ProcessReferences since we may end up doing many calls of
65// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080066static constexpr size_t kMinimumParallelMarkStackSize = 128;
67static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070068
Mathieu Chartier02b6a782012-10-26 13:51:26 -070069// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080070static constexpr bool kProfileLargeObjects = false;
71static constexpr bool kMeasureOverhead = false;
72static constexpr bool kCountTasks = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070073static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070074
75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080076static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier7bf9f192014-04-04 11:09:41 -070077static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070078
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070079// If true, revoke the rosalloc thread-local buffers at the
80// checkpoint, as opposed to during the pause.
81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82
Mathieu Chartier2b82db42012-11-14 17:29:05 -080083void MarkSweep::BindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -070084 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080086 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070087 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -070088 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070089 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080090 }
91 }
92}
93
Ian Rogers1d54e732013-05-02 21:10:01 -070094MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
95 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -070096 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -070097 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Ian Rogers3e5cf302014-05-20 16:40:37 -070098 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
Mathieu Chartier2b82db42012-11-14 17:29:05 -080099 gc_barrier_(new Barrier(0)),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700100 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Ian Rogers3e5cf302014-05-20 16:40:37 -0700101 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700102 std::string error_msg;
103 MemMap* mem_map = MemMap::MapAnonymous(
104 "mark sweep sweep array free buffer", nullptr,
105 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
Vladimir Marko5c42c292015-02-25 12:02:49 +0000106 PROT_READ | PROT_WRITE, false, false, &error_msg);
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700107 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
108 sweep_array_free_buffer_mem_map_.reset(mem_map);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800109}
110
111void MarkSweep::InitializePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700112 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700113 mark_stack_ = heap_->GetMarkStack();
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700114 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700115 immune_region_.Reset();
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -0700116 no_reference_class_count_.StoreRelaxed(0);
117 normal_count_.StoreRelaxed(0);
Ian Rogers3e5cf302014-05-20 16:40:37 -0700118 class_count_.StoreRelaxed(0);
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -0700119 object_array_count_.StoreRelaxed(0);
Ian Rogers3e5cf302014-05-20 16:40:37 -0700120 other_count_.StoreRelaxed(0);
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -0700121 reference_count_.StoreRelaxed(0);
Ian Rogers3e5cf302014-05-20 16:40:37 -0700122 large_object_test_.StoreRelaxed(0);
123 large_object_mark_.StoreRelaxed(0);
124 overhead_time_ .StoreRelaxed(0);
125 work_chunks_created_.StoreRelaxed(0);
126 work_chunks_deleted_.StoreRelaxed(0);
Ian Rogers3e5cf302014-05-20 16:40:37 -0700127 mark_null_count_.StoreRelaxed(0);
128 mark_immune_count_.StoreRelaxed(0);
129 mark_fastpath_count_.StoreRelaxed(0);
130 mark_slowpath_count_.StoreRelaxed(0);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700131 {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700132 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700133 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
134 mark_bitmap_ = heap_->GetMarkBitmap();
135 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700136 if (!GetCurrentIteration()->GetClearSoftReferences()) {
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700137 // Always clear soft references if a non-sticky collection.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700138 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700139 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700140}
141
142void MarkSweep::RunPhases() {
143 Thread* self = Thread::Current();
144 InitializePhase();
145 Locks::mutator_lock_->AssertNotHeld(self);
146 if (IsConcurrent()) {
147 GetHeap()->PreGcVerification(this);
148 {
149 ReaderMutexLock mu(self, *Locks::mutator_lock_);
150 MarkingPhase();
151 }
152 ScopedPause pause(this);
153 GetHeap()->PrePauseRosAllocVerification(this);
154 PausePhase();
155 RevokeAllThreadLocalBuffers();
156 } else {
157 ScopedPause pause(this);
158 GetHeap()->PreGcVerificationPaused(this);
159 MarkingPhase();
160 GetHeap()->PrePauseRosAllocVerification(this);
161 PausePhase();
162 RevokeAllThreadLocalBuffers();
163 }
164 {
165 // Sweeping always done concurrently, even for non concurrent mark sweep.
166 ReaderMutexLock mu(self, *Locks::mutator_lock_);
167 ReclaimPhase();
168 }
169 GetHeap()->PostGcVerification(this);
170 FinishPhase();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800171}
172
173void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700175 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -0700176 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700177}
178
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700179void MarkSweep::PausePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700180 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800181 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800182 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700183 if (IsConcurrent()) {
184 // Handle the dirty objects if we are a concurrent GC.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800185 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800186 // Re-mark root set.
187 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800188 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700189 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800190 }
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700191 {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700192 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800193 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700194 heap_->SwapStacks();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700195 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
196 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
197 // stacks and don't want anybody to allocate into the live stack.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800198 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800199 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700200 heap_->PreSweepingGcVerification(this);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700201 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
202 // weak before we sweep them. Since this new system weak may not be marked, the GC may
203 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
204 // reference to a string that is about to be swept.
205 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700206 // Enable the reference processing slow path, needs to be done with mutators paused since there
207 // is no lock in the GetReferent fast path.
208 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800209}
210
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800211void MarkSweep::PreCleanCards() {
212 // Don't do this for non concurrent GCs since they don't have any dirty cards.
213 if (kPreCleanCards && IsConcurrent()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700214 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800215 Thread* self = Thread::Current();
216 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
217 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Lei Li4add3b42015-01-15 11:55:26 +0800218 heap_->ProcessCards(GetTimings(), false, true, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800219 // The checkpoint root marking is required to avoid a race condition which occurs if the
220 // following happens during a reference write:
221 // 1. mutator dirties the card (write barrier)
222 // 2. GC ages the card (the above ProcessCards call)
223 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
224 // 4. mutator writes the value (corresponding to the write barrier in 1.)
225 // This causes the GC to age the card but not necessarily mark the reference which the mutator
226 // wrote into the object stored in the card.
227 // Having the checkpoint fixes this issue since it ensures that the card mark and the
228 // reference write are visible to the GC before the card is scanned (this is due to locks being
229 // acquired / released in the checkpoint code).
230 // The other roots are also marked to help reduce the pause.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700231 MarkRootsCheckpoint(self, false);
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800232 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800233 MarkConcurrentRoots(
234 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800235 // Process the newly aged cards.
236 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
237 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
238 // in the next GC.
239 }
240}
241
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800242void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
243 if (kUseThreadLocalAllocationStack) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700244 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800245 Locks::mutator_lock_->AssertExclusiveHeld(self);
246 heap_->RevokeAllThreadLocalAllocationStacks(self);
247 }
248}
249
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800250void MarkSweep::MarkingPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700251 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800252 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800253 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700254 FindDefaultSpaceBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800255 // Process dirty cards and add dirty cards to mod union tables.
Lei Li4add3b42015-01-15 11:55:26 +0800256 // If the GC type is non sticky, then we just clear the cards instead of ageing them.
257 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800258 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800259 MarkRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800260 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800261 // Pre-clean dirtied cards to reduce pauses.
262 PreCleanCards();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800263}
264
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700265void MarkSweep::UpdateAndMarkModUnion() {
266 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700267 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700268 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
269 "UpdateAndMarkImageModUnionTable";
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700270 TimingLogger::ScopedTiming t(name, GetTimings());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700271 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
272 CHECK(mod_union_table != nullptr);
Mathieu Chartier97509952015-07-13 14:35:43 -0700273 mod_union_table->UpdateAndMarkReferences(this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700274 }
275 }
276}
277
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800278void MarkSweep::MarkReachableObjects() {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700279 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800280 // Recursively mark all the non-image bits set in the mark bitmap.
281 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800282}
283
284void MarkSweep::ReclaimPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700285 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700286 Thread* const self = Thread::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700287 // Process the references concurrently.
288 ProcessReferences(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700289 SweepSystemWeaks(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700290 Runtime* const runtime = Runtime::Current();
291 runtime->AllowNewSystemWeaks();
292 // Clean up class loaders after system weaks are swept since that is how we know if class
293 // unloading occurred.
294 runtime->GetClassLinker()->CleanupClassLoaders();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800295 {
296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700297 GetHeap()->RecordFreeRevoke();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800298 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700299 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800300 // Swap the live and mark bitmaps for each space which we modified space. This is an
301 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
302 // bitmaps.
303 SwapBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800304 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800305 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800306 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800307}
308
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700309void MarkSweep::FindDefaultSpaceBitmap() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700310 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier02e25112013-08-14 16:14:24 -0700311 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700312 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700313 // We want to have the main space instead of non moving if possible.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700314 if (bitmap != nullptr &&
315 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700316 current_space_bitmap_ = bitmap;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700317 // If we are not the non moving space exit the loop early since this will be good enough.
318 if (space != heap_->GetNonMovingSpace()) {
319 break;
320 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700321 }
322 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -0700323 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
324 << heap_->DumpSpaces();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700325}
326
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800327void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700328 ResizeMarkStack(mark_stack_->Capacity() * 2);
329}
330
331void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800332 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800333 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
334 // Someone else acquired the lock and expanded the mark stack before us.
335 return;
336 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700337 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700338 CHECK_LE(mark_stack_->Size(), new_size);
339 mark_stack_->Resize(new_size);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800340 for (auto& obj : temp) {
341 mark_stack_->PushBack(obj.AsMirrorPtr());
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800342 }
343}
344
Mathieu Chartiere48a1692015-07-15 19:58:45 -0700345mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
346 MarkObject(obj, nullptr, MemberOffset(0));
347 return obj;
348}
349
Mathieu Chartier97509952015-07-13 14:35:43 -0700350inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700351 DCHECK(obj != nullptr);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800352 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700353 MutexLock mu(Thread::Current(), mark_stack_lock_);
354 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700355 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800356 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700357 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700358 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800359 }
360}
361
Mathieu Chartier97509952015-07-13 14:35:43 -0700362bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
363 return IsMarked(ref->AsMirrorPtr());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700364}
365
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700366class MarkSweepMarkObjectSlowPath {
367 public:
Mathieu Chartier97509952015-07-13 14:35:43 -0700368 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr,
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700369 MemberOffset offset = MemberOffset(0))
370 : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700371 }
372
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700373 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700374 if (kProfileLargeObjects) {
375 // TODO: Differentiate between marking and testing somehow.
376 ++mark_sweep_->large_object_test_;
377 ++mark_sweep_->large_object_mark_;
378 }
379 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiera17288e2014-05-08 17:53:19 -0700380 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700381 (kIsDebugBuild && large_object_space != nullptr &&
382 !large_object_space->Contains(obj)))) {
Mathieu Chartier175746a2015-05-01 13:00:23 -0700383 LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700384 if (holder_ != nullptr) {
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700385 size_t holder_size = holder_->SizeOf();
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700386 ArtField* field = holder_->FindFieldByOffset(offset_);
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700387 LOG(INTERNAL_FATAL) << "Field info: "
388 << " holder=" << holder_
Hiroshi Yamauchi679b1cf2015-05-21 12:05:27 -0700389 << " holder is "
390 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
391 ? "alive" : "dead")
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700392 << " holder_size=" << holder_size
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700393 << " holder_type=" << PrettyTypeOf(holder_)
394 << " offset=" << offset_.Uint32Value()
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700395 << " field=" << (field != nullptr ? field->GetName() : "nullptr")
396 << " field_type="
397 << (field != nullptr ? field->GetTypeDescriptor() : "")
398 << " first_ref_field_offset="
399 << (holder_->IsClass()
Mathieu Chartiere401d142015-04-22 13:56:20 -0700400 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
401 sizeof(void*))
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700402 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
403 << " num_of_ref_fields="
404 << (holder_->IsClass()
405 ? holder_->AsClass()->NumReferenceStaticFields()
406 : holder_->GetClass()->NumReferenceInstanceFields())
407 << "\n";
Hiroshi Yamauchi679b1cf2015-05-21 12:05:27 -0700408 // Print the memory content of the holder.
409 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
410 uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
411 LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
412 << std::hex << p[i];
413 }
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700414 }
Hiroshi Yamauchid38ec802015-05-01 11:50:24 -0700415 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
Vladimir Marko17a924a2015-05-08 15:17:32 +0100416 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700417 {
418 LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
419 Thread* self = Thread::Current();
420 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
421 mark_sweep_->VerifyRoots();
422 } else {
423 const bool heap_bitmap_exclusive_locked =
424 Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
425 if (heap_bitmap_exclusive_locked) {
426 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
427 }
Mathieu Chartier4f55e222015-09-04 13:26:21 -0700428 {
429 ScopedThreadSuspension(self, kSuspended);
430 ScopedSuspendAll ssa(__FUNCTION__);
431 mark_sweep_->VerifyRoots();
432 }
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700433 if (heap_bitmap_exclusive_locked) {
434 Locks::heap_bitmap_lock_->ExclusiveLock(self);
435 }
436 }
437 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700438 LOG(FATAL) << "Can't mark invalid object";
439 }
440 }
441
442 private:
443 MarkSweep* const mark_sweep_;
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700444 mirror::Object* const holder_;
445 MemberOffset offset_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700446};
447
Mathieu Chartier97509952015-07-13 14:35:43 -0700448inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder,
449 MemberOffset offset) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700450 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700451 if (kUseBakerOrBrooksReadBarrier) {
452 // Verify all the objects have the correct pointer installed.
453 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800454 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700455 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700456 if (kCountMarkedObjects) {
457 ++mark_immune_count_;
458 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700459 DCHECK(mark_bitmap_->Test(obj));
460 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
461 if (kCountMarkedObjects) {
462 ++mark_fastpath_count_;
463 }
464 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
465 PushOnMarkStack(obj); // This object was not previously marked.
466 }
467 } else {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700468 if (kCountMarkedObjects) {
469 ++mark_slowpath_count_;
470 }
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700471 MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700472 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
473 // will check again.
474 if (!mark_bitmap_->Set(obj, visitor)) {
475 PushOnMarkStack(obj); // Was not already marked, push.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700476 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700477 }
478}
479
Mathieu Chartier97509952015-07-13 14:35:43 -0700480inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700481 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
482 // Lock is not needed but is here anyways to please annotalysis.
483 MutexLock mu(Thread::Current(), mark_stack_lock_);
484 ExpandMarkStack();
485 }
486 // The object must be pushed on to the mark stack.
487 mark_stack_->PushBack(obj);
488}
489
Mathieu Chartier97509952015-07-13 14:35:43 -0700490inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700491 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700492 if (kUseBakerOrBrooksReadBarrier) {
493 // Verify all the objects have the correct pointer installed.
494 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800495 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700496 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700497 DCHECK(IsMarked(obj) != nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700498 return false;
499 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700500 // Try to take advantage of locality of references within a space, failing this find the space
501 // the hard way.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700502 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700503 if (LIKELY(object_bitmap->HasAddress(obj))) {
504 return !object_bitmap->AtomicTestAndSet(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700505 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700506 MarkSweepMarkObjectSlowPath visitor(this);
507 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700508}
509
Mathieu Chartier97509952015-07-13 14:35:43 -0700510void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
511 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
512}
513
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700514// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
Mathieu Chartier97509952015-07-13 14:35:43 -0700515inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder,
516 MemberOffset offset) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700517 if (obj != nullptr) {
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700518 MarkObjectNonNull(obj, holder, offset);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700519 } else if (kCountMarkedObjects) {
520 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700521 }
522}
523
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700524class VerifyRootMarkedVisitor : public SingleRootVisitor {
525 public:
526 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
527
528 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -0700529 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700530 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700531 }
532
533 private:
534 MarkSweep* const collector_;
535};
536
537void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
538 const RootInfo& info ATTRIBUTE_UNUSED) {
539 for (size_t i = 0; i < count; ++i) {
540 MarkObjectNonNull(*roots[i]);
541 }
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800542}
543
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700544void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
545 const RootInfo& info ATTRIBUTE_UNUSED) {
546 for (size_t i = 0; i < count; ++i) {
547 MarkObjectNonNull(roots[i]->AsMirrorPtr());
548 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800549}
550
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700551class VerifyRootVisitor : public SingleRootVisitor {
552 public:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700553 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -0700554 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700555 // See if the root is on any space bitmap.
556 auto* heap = Runtime::Current()->GetHeap();
557 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
558 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
559 if (large_object_space != nullptr && !large_object_space->Contains(root)) {
Mathieu Chartier175746a2015-05-01 13:00:23 -0700560 LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
Mathieu Chartier9086b652015-04-14 09:35:18 -0700561 }
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700562 }
563 }
Mathieu Chartier9086b652015-04-14 09:35:18 -0700564};
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700565
566void MarkSweep::VerifyRoots() {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700567 VerifyRootVisitor visitor;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700568 Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700569}
570
Mathieu Chartier893263b2014-03-04 11:07:42 -0800571void MarkSweep::MarkRoots(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700572 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -0800573 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
574 // If we exclusively hold the mutator lock, all threads must be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700575 Runtime::Current()->VisitRoots(this);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800576 RevokeAllThreadLocalAllocationStacks(self);
577 } else {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700578 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800579 // At this point the live stack should no longer have any mutators which push into it.
580 MarkNonThreadRoots();
581 MarkConcurrentRoots(
582 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
583 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700584}
585
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700586void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700587 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700588 Runtime::Current()->VisitNonThreadRoots(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700589}
590
Mathieu Chartier893263b2014-03-04 11:07:42 -0800591void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700592 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Ian Rogers1d54e732013-05-02 21:10:01 -0700593 // Visit all runtime roots and clear dirty flags.
Mathieu Chartier4edd8472015-06-01 10:47:36 -0700594 Runtime::Current()->VisitConcurrentRoots(
595 this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700596}
597
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700598class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700599 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700600 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
601 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700602
Mathieu Chartier97509952015-07-13 14:35:43 -0700603 void operator()(mirror::Object* obj) const ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700604 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700605 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800606 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
607 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
608 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700609 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700610 }
611
612 private:
613 MarkSweep* const mark_sweep_;
614};
615
Mathieu Chartier407f7022014-02-18 14:37:05 -0800616class DelayReferenceReferentVisitor {
617 public:
618 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
619 }
620
621 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700622 SHARED_REQUIRES(Locks::mutator_lock_)
623 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800624 collector_->DelayReferenceReferent(klass, ref);
625 }
626
627 private:
628 MarkSweep* const collector_;
629};
630
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700631template <bool kUseFinger = false>
632class MarkStackTask : public Task {
633 public:
634 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
Mathieu Chartier97509952015-07-13 14:35:43 -0700635 StackReference<mirror::Object>* mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700636 : mark_sweep_(mark_sweep),
637 thread_pool_(thread_pool),
638 mark_stack_pos_(mark_stack_size) {
639 // We may have to copy part of an existing mark stack when another mark stack overflows.
640 if (mark_stack_size != 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700641 DCHECK(mark_stack != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700642 // TODO: Check performance?
643 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700644 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700645 if (kCountTasks) {
646 ++mark_sweep_->work_chunks_created_;
647 }
648 }
649
650 static const size_t kMaxSize = 1 * KB;
651
652 protected:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800653 class MarkObjectParallelVisitor {
654 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100655 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
656 MarkSweep* mark_sweep)
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700657 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
Mathieu Chartier407f7022014-02-18 14:37:05 -0800658
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700659 void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
660 ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
661 Mark(obj->GetFieldObject<mirror::Object>(offset));
662 }
663
664 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700665 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700666 if (!root->IsNull()) {
667 VisitRoot(root);
668 }
669 }
670
671 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
672 SHARED_REQUIRES(Locks::mutator_lock_) {
673 if (kCheckLocks) {
674 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
675 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
676 }
677 Mark(root->AsMirrorPtr());
678 }
679
680 private:
681 void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800682 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
683 if (kUseFinger) {
Yabin Cuic7df66e2015-04-15 15:40:18 -0700684 std::atomic_thread_fence(std::memory_order_seq_cst);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800685 if (reinterpret_cast<uintptr_t>(ref) >=
Ian Rogers3e5cf302014-05-20 16:40:37 -0700686 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800687 return;
688 }
689 }
690 chunk_task_->MarkStackPush(ref);
691 }
692 }
693
Mathieu Chartier407f7022014-02-18 14:37:05 -0800694 MarkStackTask<kUseFinger>* const chunk_task_;
695 MarkSweep* const mark_sweep_;
696 };
697
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700698 class ScanObjectParallelVisitor {
699 public:
700 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
701 : chunk_task_(chunk_task) {}
702
Mathieu Chartier407f7022014-02-18 14:37:05 -0800703 // No thread safety analysis since multiple threads will use this visitor.
Mathieu Chartier90443472015-07-16 20:32:27 -0700704 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
705 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800706 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
707 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
708 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
709 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700710 }
711
712 private:
713 MarkStackTask<kUseFinger>* const chunk_task_;
714 };
715
716 virtual ~MarkStackTask() {
717 // Make sure that we have cleared our mark stack.
718 DCHECK_EQ(mark_stack_pos_, 0U);
719 if (kCountTasks) {
720 ++mark_sweep_->work_chunks_deleted_;
721 }
722 }
723
724 MarkSweep* const mark_sweep_;
725 ThreadPool* const thread_pool_;
726 // Thread local mark stack for this task.
Mathieu Chartier97509952015-07-13 14:35:43 -0700727 StackReference<mirror::Object> mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700728 // Mark stack position.
729 size_t mark_stack_pos_;
730
Mathieu Chartier97509952015-07-13 14:35:43 -0700731 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
Mathieu Chartier90443472015-07-16 20:32:27 -0700732 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700733 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
734 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
735 mark_stack_pos_ /= 2;
736 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
737 mark_stack_ + mark_stack_pos_);
738 thread_pool_->AddTask(Thread::Current(), task);
739 }
740 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700741 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800742 mark_stack_[mark_stack_pos_++].Assign(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700743 }
744
745 virtual void Finalize() {
746 delete this;
747 }
748
749 // Scans all of the objects
Mathieu Chartier90443472015-07-16 20:32:27 -0700750 virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
751 REQUIRES(Locks::heap_bitmap_lock_) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700752 UNUSED(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700753 ScanObjectParallelVisitor visitor(this);
754 // TODO: Tune this.
755 static const size_t kFifoSize = 4;
Mathieu Chartier97509952015-07-13 14:35:43 -0700756 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700757 for (;;) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700758 mirror::Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700759 if (kUseMarkStackPrefetch) {
760 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700761 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800762 DCHECK(mark_stack_obj != nullptr);
763 __builtin_prefetch(mark_stack_obj);
764 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700765 }
766 if (UNLIKELY(prefetch_fifo.empty())) {
767 break;
768 }
769 obj = prefetch_fifo.front();
770 prefetch_fifo.pop_front();
771 } else {
772 if (UNLIKELY(mark_stack_pos_ == 0)) {
773 break;
774 }
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800775 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700776 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700777 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700778 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700779 }
780 }
781};
782
783class CardScanTask : public MarkStackTask<false> {
784 public:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700785 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
786 accounting::ContinuousSpaceBitmap* bitmap,
Ian Rogers13735952014-10-08 12:43:28 -0700787 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
Mathieu Chartier97509952015-07-13 14:35:43 -0700788 StackReference<mirror::Object>* mark_stack_obj, bool clear_card)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700789 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
790 bitmap_(bitmap),
791 begin_(begin),
792 end_(end),
Lei Li727b2942015-01-15 11:26:34 +0800793 minimum_age_(minimum_age), clear_card_(clear_card) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700794 }
795
796 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700797 accounting::ContinuousSpaceBitmap* const bitmap_;
Ian Rogers13735952014-10-08 12:43:28 -0700798 uint8_t* const begin_;
799 uint8_t* const end_;
800 const uint8_t minimum_age_;
Lei Li727b2942015-01-15 11:26:34 +0800801 const bool clear_card_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700802
803 virtual void Finalize() {
804 delete this;
805 }
806
807 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
808 ScanObjectParallelVisitor visitor(this);
809 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Lei Li727b2942015-01-15 11:26:34 +0800810 size_t cards_scanned = clear_card_ ?
811 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
812 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700813 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
814 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700815 // Finish by emptying our local mark stack.
816 MarkStackTask::Run(self);
817 }
818};
819
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700820size_t MarkSweep::GetThreadCount(bool paused) const {
821 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700822 return 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700823 }
Mathieu Chartier10d68862015-04-15 14:21:33 -0700824 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700825}
826
Ian Rogers13735952014-10-08 12:43:28 -0700827void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700828 accounting::CardTable* card_table = GetHeap()->GetCardTable();
829 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700830 size_t thread_count = GetThreadCount(paused);
831 // The parallel version with only one thread is faster for card scanning, TODO: fix.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700832 if (kParallelCardScan && thread_count > 1) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700833 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700834 // Can't have a different split for each space since multiple spaces can have their cards being
835 // scanned at the same time.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700836 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
837 GetTimings());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700838 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartier97509952015-07-13 14:35:43 -0700839 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
840 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700841 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700842 // Estimated number of work tasks we will create.
843 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
844 DCHECK_NE(mark_stack_tasks, 0U);
845 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
846 mark_stack_size / mark_stack_tasks + 1);
847 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700848 if (space->GetMarkBitmap() == nullptr) {
849 continue;
850 }
Ian Rogers13735952014-10-08 12:43:28 -0700851 uint8_t* card_begin = space->Begin();
852 uint8_t* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800853 // Align up the end address. For example, the image space's end
854 // may not be card-size-aligned.
855 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
Roland Levillain14d90572015-07-16 10:52:26 +0100856 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
857 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700858 // Calculate how many bytes of heap we will scan,
859 const size_t address_range = card_end - card_begin;
860 // Calculate how much address range each task gets.
861 const size_t card_delta = RoundUp(address_range / thread_count + 1,
862 accounting::CardTable::kCardSize);
Lei Li727b2942015-01-15 11:26:34 +0800863 // If paused and the space is neither zygote nor image space, we could clear the dirty
864 // cards to avoid accumulating them to increase card scanning load in the following GC
865 // cycles. We need to keep dirty cards of image space and zygote space in order to track
866 // references to the other spaces.
867 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700868 // Create the worker tasks for this space.
869 while (card_begin != card_end) {
870 // Add a range of cards.
871 size_t addr_remaining = card_end - card_begin;
872 size_t card_increment = std::min(card_delta, addr_remaining);
873 // Take from the back of the mark stack.
874 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
875 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
876 mark_stack_end -= mark_stack_increment;
877 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700878 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700879 // Add the new task to the thread pool.
880 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
881 card_begin + card_increment, minimum_age,
Lei Li727b2942015-01-15 11:26:34 +0800882 mark_stack_increment, mark_stack_end, clear_card);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700883 thread_pool->AddTask(self, task);
884 card_begin += card_increment;
885 }
886 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700887
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800888 // Note: the card scan below may dirty new cards (and scan them)
889 // as a side effect when a Reference object is encountered and
890 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700891 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700892 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700893 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700894 thread_pool->StopWorkers(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700895 } else {
896 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700897 if (space->GetMarkBitmap() != nullptr) {
898 // Image spaces are handled properly since live == marked for them.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700899 const char* name = nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700900 switch (space->GetGcRetentionPolicy()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700901 case space::kGcRetentionPolicyNeverCollect:
902 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
903 break;
904 case space::kGcRetentionPolicyFullCollect:
905 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
906 break;
907 case space::kGcRetentionPolicyAlwaysCollect:
908 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
909 break;
910 default:
911 LOG(FATAL) << "Unreachable";
Ian Rogers2c4257b2014-10-24 14:20:06 -0700912 UNREACHABLE();
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700913 }
914 TimingLogger::ScopedTiming t(name, GetTimings());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700915 ScanObjectVisitor visitor(this);
Lei Li727b2942015-01-15 11:26:34 +0800916 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
917 if (clear_card) {
918 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
919 minimum_age);
920 } else {
921 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
922 minimum_age);
923 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700924 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700925 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700926 }
927}
928
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700929class RecursiveMarkTask : public MarkStackTask<false> {
930 public:
931 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700932 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700933 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700934 end_(end) {
935 }
936
937 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700938 accounting::ContinuousSpaceBitmap* const bitmap_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700939 const uintptr_t begin_;
940 const uintptr_t end_;
941
942 virtual void Finalize() {
943 delete this;
944 }
945
946 // Scans all of the objects
947 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
948 ScanObjectParallelVisitor visitor(this);
949 bitmap_->VisitMarkedRange(begin_, end_, visitor);
950 // Finish by emptying our local mark stack.
951 MarkStackTask::Run(self);
952 }
953};
954
Carl Shapiro58551df2011-07-24 03:09:51 -0700955// Populates the mark stack based on the set of marked objects and
956// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800957void MarkSweep::RecursiveMark() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700958 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800959 // RecursiveMark will build the lists of known instances of the Reference classes. See
960 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700961 if (kUseRecursiveMark) {
962 const bool partial = GetGcType() == kGcTypePartial;
963 ScanObjectVisitor scan_visitor(this);
964 auto* self = Thread::Current();
965 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700966 size_t thread_count = GetThreadCount(false);
967 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700968 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700969 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700970 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
971 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700972 current_space_bitmap_ = space->GetMarkBitmap();
973 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700974 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800975 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700976 if (parallel) {
977 // We will use the mark stack the future.
978 // CHECK(mark_stack_->IsEmpty());
979 // This function does not handle heap end increasing, so we must use the space end.
980 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
981 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers3e5cf302014-05-20 16:40:37 -0700982 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700983
984 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700985 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700986 while (begin != end) {
987 uintptr_t start = begin;
988 uintptr_t delta = (end - begin) / n;
989 delta = RoundUp(delta, KB);
990 if (delta < 16 * KB) delta = end - begin;
991 begin += delta;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700992 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700993 begin);
994 thread_pool->AddTask(self, task);
995 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700996 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700997 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700998 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700999 thread_pool->StopWorkers(self);
1000 } else {
1001 // This function does not handle heap end increasing, so we must use the space end.
1002 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1003 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001004 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001005 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001006 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001007 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001008 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001009 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001010}
1011
Ian Rogers13735952014-10-08 12:43:28 -07001012void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001013 ScanGrayObjects(paused, minimum_age);
1014 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001015}
1016
Carl Shapiro58551df2011-07-24 03:09:51 -07001017void MarkSweep::ReMarkRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001018 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -08001019 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001020 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1021 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
Mathieu Chartier7bf9f192014-04-04 11:09:41 -07001022 if (kVerifyRootsMarked) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001023 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001024 VerifyRootMarkedVisitor visitor(this);
1025 Runtime::Current()->VisitRoots(&visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -08001026 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001027}
1028
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001029void MarkSweep::SweepSystemWeaks(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001030 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier14c3bf92015-07-13 14:35:43 -07001031 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001032 Runtime::Current()->SweepSystemWeaks(this);
Carl Shapiro58551df2011-07-24 03:09:51 -07001033}
1034
Mathieu Chartier97509952015-07-13 14:35:43 -07001035class VerifySystemWeakVisitor : public IsMarkedVisitor {
1036 public:
1037 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001038
Mathieu Chartier97509952015-07-13 14:35:43 -07001039 virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -07001040 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001041 mark_sweep_->VerifyIsLive(obj);
1042 return obj;
1043 }
1044
1045 MarkSweep* const mark_sweep_;
1046};
1047
1048void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001049 if (!heap_->GetLiveBitmap()->Test(obj)) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001050 // TODO: Consider live stack? Has this code bitrotted?
1051 CHECK(!heap_->allocation_stack_->Contains(obj))
1052 << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001053 }
1054}
1055
1056void MarkSweep::VerifySystemWeaks() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001057 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001058 // Verify system weaks, uses a special object visitor which returns the input object.
Mathieu Chartier97509952015-07-13 14:35:43 -07001059 VerifySystemWeakVisitor visitor(this);
1060 Runtime::Current()->SweepSystemWeaks(&visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001061}
1062
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001063class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001064 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001065 CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1066 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001067 : mark_sweep_(mark_sweep),
1068 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1069 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1070 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001071
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001072 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -07001073 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1074 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001075 for (size_t i = 0; i < count; ++i) {
1076 mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1077 }
1078 }
1079
1080 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
1081 const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -07001082 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1083 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001084 for (size_t i = 0; i < count; ++i) {
1085 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1086 }
1087 }
1088
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001089 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001090 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001091 // Note: self is not necessarily equal to thread since thread may be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001092 Thread* const self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001093 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1094 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001095 thread->VisitRoots(this);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001096 ATRACE_END();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001097 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1098 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001099 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001100 ATRACE_END();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001101 }
Lei Lidd9943d2015-02-02 14:24:44 +08001102 // If thread is a running mutator, then act on behalf of the garbage collector.
1103 // See the code in ThreadList::RunCheckpoint.
1104 if (thread->GetState() == kRunnable) {
1105 mark_sweep_->GetBarrier().Pass(self);
1106 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001107 }
1108
1109 private:
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001110 MarkSweep* const mark_sweep_;
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001111 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001112};
1113
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001114void MarkSweep::MarkRootsCheckpoint(Thread* self,
1115 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001116 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001117 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001118 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001119 // Request the check point is run on all threads returning a count of the threads that must
1120 // run through the barrier including self.
1121 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1122 // Release locks then wait for all mutator threads to pass the barrier.
Lei Lidd9943d2015-02-02 14:24:44 +08001123 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1124 // then no need to release locks.
1125 if (barrier_count == 0) {
1126 return;
1127 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001128 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1129 Locks::mutator_lock_->SharedUnlock(self);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001130 {
1131 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1132 gc_barrier_->Increment(self, barrier_count);
1133 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001134 Locks::mutator_lock_->SharedLock(self);
1135 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001136}
1137
Ian Rogers1d54e732013-05-02 21:10:01 -07001138void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001139 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001140 Thread* self = Thread::Current();
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -07001141 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1142 sweep_array_free_buffer_mem_map_->BaseBegin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001143 size_t chunk_free_pos = 0;
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001144 ObjectBytePair freed;
1145 ObjectBytePair freed_los;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001146 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartier97509952015-07-13 14:35:43 -07001147 StackReference<mirror::Object>* objects = allocations->Begin();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001148 size_t count = allocations->Size();
1149 // Change the order to ensure that the non-moving space last swept as an optimization.
1150 std::vector<space::ContinuousSpace*> sweep_spaces;
1151 space::ContinuousSpace* non_moving_space = nullptr;
1152 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001153 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1154 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001155 if (space == heap_->GetNonMovingSpace()) {
1156 non_moving_space = space;
1157 } else {
1158 sweep_spaces.push_back(space);
1159 }
1160 }
1161 }
1162 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1163 // the other alloc spaces as an optimization.
1164 if (non_moving_space != nullptr) {
1165 sweep_spaces.push_back(non_moving_space);
1166 }
1167 // Start by sweeping the continuous spaces.
1168 for (space::ContinuousSpace* space : sweep_spaces) {
1169 space::AllocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001170 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1171 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001172 if (swap_bitmaps) {
1173 std::swap(live_bitmap, mark_bitmap);
1174 }
Mathieu Chartier97509952015-07-13 14:35:43 -07001175 StackReference<mirror::Object>* out = objects;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001176 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001177 mirror::Object* const obj = objects[i].AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001178 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1179 continue;
1180 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001181 if (space->HasAddress(obj)) {
1182 // This object is in the space, remove it from the array and add it to the sweep buffer
1183 // if needed.
1184 if (!mark_bitmap->Test(obj)) {
1185 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001186 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001187 freed.objects += chunk_free_pos;
1188 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001189 chunk_free_pos = 0;
1190 }
1191 chunk_free_buffer[chunk_free_pos++] = obj;
1192 }
1193 } else {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001194 (out++)->Assign(obj);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001195 }
1196 }
1197 if (chunk_free_pos > 0) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001198 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001199 freed.objects += chunk_free_pos;
1200 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001201 chunk_free_pos = 0;
1202 }
1203 // All of the references which space contained are no longer in the allocation stack, update
1204 // the count.
1205 count = out - objects;
1206 }
1207 // Handle the large object space.
1208 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001209 if (large_object_space != nullptr) {
1210 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1211 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1212 if (swap_bitmaps) {
1213 std::swap(large_live_objects, large_mark_objects);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001214 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001215 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001216 mirror::Object* const obj = objects[i].AsMirrorPtr();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001217 // Handle large objects.
1218 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1219 continue;
1220 }
1221 if (!large_mark_objects->Test(obj)) {
1222 ++freed_los.objects;
1223 freed_los.bytes += large_object_space->Free(self, obj);
1224 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001225 }
1226 }
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001227 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001228 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001229 RecordFree(freed);
1230 RecordFreeLOS(freed_los);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001231 t2.NewTiming("ResetStack");
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001232 allocations->Reset();
1233 }
Ian Rogersc5f17732014-06-05 20:48:42 -07001234 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001235}
1236
Ian Rogers1d54e732013-05-02 21:10:01 -07001237void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001238 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001239 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1240 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001241 {
1242 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1243 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1244 // knowing that new allocations won't be marked as live.
1245 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1246 heap_->MarkAllocStackAsLive(live_stack);
1247 live_stack->Reset();
1248 DCHECK(mark_stack_->IsEmpty());
1249 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001250 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001251 if (space->IsContinuousMemMapAllocSpace()) {
1252 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001253 TimingLogger::ScopedTiming split(
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001254 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
1255 RecordFree(alloc_space->Sweep(swap_bitmaps));
Carl Shapiro58551df2011-07-24 03:09:51 -07001256 }
1257 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001258 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001259}
1260
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001261void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001262 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1263 if (los != nullptr) {
1264 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1265 RecordFreeLOS(los->Sweep(swap_bitmaps));
1266 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001267}
1268
Mathieu Chartier407f7022014-02-18 14:37:05 -08001269// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
1270// marked, put it on the appropriate list in the heap for later processing.
1271void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001272 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001273}
1274
Mathieu Chartier97509952015-07-13 14:35:43 -07001275class MarkVisitor {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001276 public:
Mathieu Chartier97509952015-07-13 14:35:43 -07001277 explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001278 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001279
Mathieu Chartiere4275c02015-08-06 15:34:15 -07001280 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001281 ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1282 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001283 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001284 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1285 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1286 }
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -07001287 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001288 }
1289
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001290 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1291 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
1292 if (!root->IsNull()) {
1293 VisitRoot(root);
1294 }
1295 }
1296
1297 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1298 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
1299 if (kCheckLocks) {
1300 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1301 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1302 }
1303 mark_sweep_->MarkObject(root->AsMirrorPtr());
1304 }
1305
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001306 private:
1307 MarkSweep* const mark_sweep_;
1308};
1309
Carl Shapiro69759ea2011-07-21 18:13:35 -07001310// Scans an object reference. Determines the type of the reference
1311// and dispatches to a specialized scanning routine.
Mathieu Chartier97509952015-07-13 14:35:43 -07001312void MarkSweep::ScanObject(mirror::Object* obj) {
1313 MarkVisitor mark_visitor(this);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001314 DelayReferenceReferentVisitor ref_visitor(this);
1315 ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001316}
1317
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001318void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001319 Thread* self = Thread::Current();
1320 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001321 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1322 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001323 CHECK_GT(chunk_size, 0U);
1324 // Split the current mark stack up into work tasks.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001325 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001326 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001327 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001328 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001329 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001330 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001331 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001332 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001333 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001334 mark_stack_->Reset();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001335 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1336 work_chunks_deleted_.LoadSequentiallyConsistent())
1337 << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001338}
1339
Ian Rogers5d76c432011-10-31 21:42:49 -07001340// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001341void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001342 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001343 size_t thread_count = GetThreadCount(paused);
1344 if (kParallelProcessMarkStack && thread_count > 1 &&
1345 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1346 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001347 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001348 // TODO: Tune this.
1349 static const size_t kFifoSize = 4;
Mathieu Chartier97509952015-07-13 14:35:43 -07001350 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001351 for (;;) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001352 mirror::Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001353 if (kUseMarkStackPrefetch) {
1354 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001355 mirror::Object* mark_stack_obj = mark_stack_->PopBack();
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001356 DCHECK(mark_stack_obj != nullptr);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001357 __builtin_prefetch(mark_stack_obj);
1358 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001359 }
1360 if (prefetch_fifo.empty()) {
1361 break;
1362 }
1363 obj = prefetch_fifo.front();
1364 prefetch_fifo.pop_front();
1365 } else {
1366 if (mark_stack_->IsEmpty()) {
1367 break;
1368 }
1369 obj = mark_stack_->PopBack();
1370 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001371 DCHECK(obj != nullptr);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001372 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001373 }
1374 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001375}
1376
Mathieu Chartier97509952015-07-13 14:35:43 -07001377inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001378 if (immune_region_.ContainsObject(object)) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001379 return object;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001380 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001381 if (current_space_bitmap_->HasAddress(object)) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001382 return current_space_bitmap_->Test(object) ? object : nullptr;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001383 }
Mathieu Chartier97509952015-07-13 14:35:43 -07001384 return mark_bitmap_->Test(object) ? object : nullptr;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001385}
1386
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001387void MarkSweep::FinishPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001388 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001389 if (kCountScannedTypes) {
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -07001390 VLOG(gc)
1391 << "MarkSweep scanned"
1392 << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
1393 << " normal objects=" << normal_count_.LoadRelaxed()
1394 << " classes=" << class_count_.LoadRelaxed()
1395 << " object arrays=" << object_array_count_.LoadRelaxed()
1396 << " references=" << reference_count_.LoadRelaxed()
1397 << " other=" << other_count_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001398 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001399 if (kCountTasks) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001400 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001401 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001402 if (kMeasureOverhead) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001403 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001404 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001405 if (kProfileLargeObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001406 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1407 << " marked " << large_object_mark_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001408 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001409 if (kCountMarkedObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001410 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1411 << " immune=" << mark_immune_count_.LoadRelaxed()
1412 << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1413 << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001414 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001415 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001416 mark_stack_->Reset();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001417 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1418 heap_->ClearMarkedObjects();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001419}
1420
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001421void MarkSweep::RevokeAllThreadLocalBuffers() {
1422 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1423 // If concurrent, rosalloc thread-local buffers are revoked at the
1424 // thread checkpoint. Bump pointer space thread-local buffers must
1425 // not be in use.
1426 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1427 } else {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001428 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001429 GetHeap()->RevokeAllThreadLocalBuffers();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001430 }
1431}
1432
Ian Rogers1d54e732013-05-02 21:10:01 -07001433} // namespace collector
1434} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001435} // namespace art