blob: e65d1f1ae219c72fb066968b301e3fb04945ee6d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Ian Rogerscf7f1912014-10-22 22:06:39 -070024#define ATRACE_TAG ATRACE_TAG_DALVIK
25#include "cutils/trace.h"
26
Mathieu Chartier94c32c52013-08-09 11:14:04 -070027#include "base/bounded_fifo.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080028#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080029#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080030#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080031#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070032#include "gc/accounting/card_table-inl.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070033#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070034#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070035#include "gc/accounting/space_bitmap-inl.h"
36#include "gc/heap.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070037#include "gc/reference_processor.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070038#include "gc/space/image_space.h"
39#include "gc/space/large_object_space.h"
40#include "gc/space/space-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080041#include "mark_sweep-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042#include "mirror/object-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070043#include "runtime.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070044#include "scoped_thread_state_change.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070045#include "thread-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070046#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070047
Brian Carlstrom3e3d5912013-07-18 00:19:45 -070048using ::art::mirror::Object;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080049
Carl Shapiro69759ea2011-07-21 18:13:35 -070050namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070051namespace gc {
52namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070053
Mathieu Chartier02b6a782012-10-26 13:51:26 -070054// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080055static constexpr bool kUseRecursiveMark = false;
56static constexpr bool kUseMarkStackPrefetch = true;
57static constexpr size_t kSweepArrayChunkFreeSize = 1024;
58static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070059
60// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080061static constexpr bool kParallelCardScan = true;
62static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070063// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
65// having this can add overhead in ProcessReferences since we may end up doing many calls of
66// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080067static constexpr size_t kMinimumParallelMarkStackSize = 128;
68static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070069
Mathieu Chartier02b6a782012-10-26 13:51:26 -070070// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080071static constexpr bool kProfileLargeObjects = false;
72static constexpr bool kMeasureOverhead = false;
73static constexpr bool kCountTasks = false;
74static constexpr bool kCountJavaLangRefs = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070075static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070076
77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080078static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier7bf9f192014-04-04 11:09:41 -070079static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070080
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070081// If true, revoke the rosalloc thread-local buffers at the
82// checkpoint, as opposed to during the pause.
83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
84
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085void MarkSweep::BindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -070086 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -080087 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080088 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070089 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -070090 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070091 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080092 }
93 }
94}
95
Ian Rogers1d54e732013-05-02 21:10:01 -070096MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
97 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -070098 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -070099 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Ian Rogers3e5cf302014-05-20 16:40:37 -0700100 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800101 gc_barrier_(new Barrier(0)),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700102 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Ian Rogers3e5cf302014-05-20 16:40:37 -0700103 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700104 std::string error_msg;
105 MemMap* mem_map = MemMap::MapAnonymous(
106 "mark sweep sweep array free buffer", nullptr,
107 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
Vladimir Marko5c42c292015-02-25 12:02:49 +0000108 PROT_READ | PROT_WRITE, false, false, &error_msg);
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700109 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
110 sweep_array_free_buffer_mem_map_.reset(mem_map);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800111}
112
113void MarkSweep::InitializePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700114 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700115 mark_stack_ = heap_->GetMarkStack();
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700116 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700117 immune_region_.Reset();
Ian Rogers3e5cf302014-05-20 16:40:37 -0700118 class_count_.StoreRelaxed(0);
119 array_count_.StoreRelaxed(0);
120 other_count_.StoreRelaxed(0);
121 large_object_test_.StoreRelaxed(0);
122 large_object_mark_.StoreRelaxed(0);
123 overhead_time_ .StoreRelaxed(0);
124 work_chunks_created_.StoreRelaxed(0);
125 work_chunks_deleted_.StoreRelaxed(0);
126 reference_count_.StoreRelaxed(0);
127 mark_null_count_.StoreRelaxed(0);
128 mark_immune_count_.StoreRelaxed(0);
129 mark_fastpath_count_.StoreRelaxed(0);
130 mark_slowpath_count_.StoreRelaxed(0);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700131 {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700132 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700133 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
134 mark_bitmap_ = heap_->GetMarkBitmap();
135 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700136 if (!GetCurrentIteration()->GetClearSoftReferences()) {
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700137 // Always clear soft references if a non-sticky collection.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700138 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700139 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700140}
141
142void MarkSweep::RunPhases() {
143 Thread* self = Thread::Current();
144 InitializePhase();
145 Locks::mutator_lock_->AssertNotHeld(self);
146 if (IsConcurrent()) {
147 GetHeap()->PreGcVerification(this);
148 {
149 ReaderMutexLock mu(self, *Locks::mutator_lock_);
150 MarkingPhase();
151 }
152 ScopedPause pause(this);
153 GetHeap()->PrePauseRosAllocVerification(this);
154 PausePhase();
155 RevokeAllThreadLocalBuffers();
156 } else {
157 ScopedPause pause(this);
158 GetHeap()->PreGcVerificationPaused(this);
159 MarkingPhase();
160 GetHeap()->PrePauseRosAllocVerification(this);
161 PausePhase();
162 RevokeAllThreadLocalBuffers();
163 }
164 {
165 // Sweeping always done concurrently, even for non concurrent mark sweep.
166 ReaderMutexLock mu(self, *Locks::mutator_lock_);
167 ReclaimPhase();
168 }
169 GetHeap()->PostGcVerification(this);
170 FinishPhase();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800171}
172
173void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700175 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700176 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
177 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700178}
179
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700180void MarkSweep::PausePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700181 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800182 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800183 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700184 if (IsConcurrent()) {
185 // Handle the dirty objects if we are a concurrent GC.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800187 // Re-mark root set.
188 ReMarkRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800189 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700190 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800191 }
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700192 {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700193 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700195 heap_->SwapStacks(self);
196 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
197 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
198 // stacks and don't want anybody to allocate into the live stack.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800199 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800200 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700201 heap_->PreSweepingGcVerification(this);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700202 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
203 // weak before we sweep them. Since this new system weak may not be marked, the GC may
204 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
205 // reference to a string that is about to be swept.
206 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700207 // Enable the reference processing slow path, needs to be done with mutators paused since there
208 // is no lock in the GetReferent fast path.
209 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800210}
211
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800212void MarkSweep::PreCleanCards() {
213 // Don't do this for non concurrent GCs since they don't have any dirty cards.
214 if (kPreCleanCards && IsConcurrent()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700215 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800216 Thread* self = Thread::Current();
217 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
218 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Lei Li4add3b42015-01-15 11:55:26 +0800219 heap_->ProcessCards(GetTimings(), false, true, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800220 // The checkpoint root marking is required to avoid a race condition which occurs if the
221 // following happens during a reference write:
222 // 1. mutator dirties the card (write barrier)
223 // 2. GC ages the card (the above ProcessCards call)
224 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
225 // 4. mutator writes the value (corresponding to the write barrier in 1.)
226 // This causes the GC to age the card but not necessarily mark the reference which the mutator
227 // wrote into the object stored in the card.
228 // Having the checkpoint fixes this issue since it ensures that the card mark and the
229 // reference write are visible to the GC before the card is scanned (this is due to locks being
230 // acquired / released in the checkpoint code).
231 // The other roots are also marked to help reduce the pause.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700232 MarkRootsCheckpoint(self, false);
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800233 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800234 MarkConcurrentRoots(
235 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800236 // Process the newly aged cards.
237 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
238 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
239 // in the next GC.
240 }
241}
242
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800243void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
244 if (kUseThreadLocalAllocationStack) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700245 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800246 Locks::mutator_lock_->AssertExclusiveHeld(self);
247 heap_->RevokeAllThreadLocalAllocationStacks(self);
248 }
249}
250
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800251void MarkSweep::MarkingPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700252 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800253 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800254 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700255 FindDefaultSpaceBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800256 // Process dirty cards and add dirty cards to mod union tables.
Lei Li4add3b42015-01-15 11:55:26 +0800257 // If the GC type is non sticky, then we just clear the cards instead of ageing them.
258 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800259 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800260 MarkRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800262 // Pre-clean dirtied cards to reduce pauses.
263 PreCleanCards();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800264}
265
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700266void MarkSweep::UpdateAndMarkModUnion() {
267 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700268 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700269 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
270 "UpdateAndMarkImageModUnionTable";
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700271 TimingLogger::ScopedTiming t(name, GetTimings());
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700272 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
273 CHECK(mod_union_table != nullptr);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800274 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700275 }
276 }
277}
278
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800279void MarkSweep::MarkReachableObjects() {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700280 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800281 // Recursively mark all the non-image bits set in the mark bitmap.
282 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800283}
284
285void MarkSweep::ReclaimPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700286 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier720ef762013-08-17 14:46:54 -0700287 Thread* self = Thread::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700288 // Process the references concurrently.
289 ProcessReferences(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700290 SweepSystemWeaks(self);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700291 Runtime::Current()->AllowNewSystemWeaks();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800292 {
293 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700294 GetHeap()->RecordFreeRevoke();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800295 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700296 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800297 // Swap the live and mark bitmaps for each space which we modified space. This is an
298 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
299 // bitmaps.
300 SwapBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800301 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800302 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800303 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800304}
305
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700306void MarkSweep::FindDefaultSpaceBitmap() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700307 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier02e25112013-08-14 16:14:24 -0700308 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700309 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700310 // We want to have the main space instead of non moving if possible.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700311 if (bitmap != nullptr &&
312 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700313 current_space_bitmap_ = bitmap;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700314 // If we are not the non moving space exit the loop early since this will be good enough.
315 if (space != heap_->GetNonMovingSpace()) {
316 break;
317 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700318 }
319 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -0700320 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
321 << heap_->DumpSpaces();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700322}
323
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800324void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700325 ResizeMarkStack(mark_stack_->Capacity() * 2);
326}
327
328void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800329 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800330 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
331 // Someone else acquired the lock and expanded the mark stack before us.
332 return;
333 }
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800334 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700335 CHECK_LE(mark_stack_->Size(), new_size);
336 mark_stack_->Resize(new_size);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800337 for (auto& obj : temp) {
338 mark_stack_->PushBack(obj.AsMirrorPtr());
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800339 }
340}
341
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700342inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700343 DCHECK(obj != nullptr);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800344 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700345 MutexLock mu(Thread::Current(), mark_stack_lock_);
346 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700347 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800348 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700349 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700350 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800351 }
352}
353
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800354mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800355 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
356 mark_sweep->MarkObject(obj);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800357 return obj;
358}
359
Mathieu Chartier407f7022014-02-18 14:37:05 -0800360void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
361 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
362}
363
Mathieu Chartier308351a2014-06-15 12:39:02 -0700364bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
365 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
366}
367
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700368class MarkSweepMarkObjectSlowPath {
369 public:
370 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
371 }
372
373 void operator()(const Object* obj) const ALWAYS_INLINE {
374 if (kProfileLargeObjects) {
375 // TODO: Differentiate between marking and testing somehow.
376 ++mark_sweep_->large_object_test_;
377 ++mark_sweep_->large_object_mark_;
378 }
379 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiera17288e2014-05-08 17:53:19 -0700380 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700381 (kIsDebugBuild && large_object_space != nullptr &&
382 !large_object_space->Contains(obj)))) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700383 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
384 LOG(ERROR) << "Attempting see if it's a bad root";
385 mark_sweep_->VerifyRoots();
386 LOG(FATAL) << "Can't mark invalid object";
387 }
388 }
389
390 private:
391 MarkSweep* const mark_sweep_;
392};
393
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700394inline void MarkSweep::MarkObjectNonNull(Object* obj) {
395 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700396 if (kUseBakerOrBrooksReadBarrier) {
397 // Verify all the objects have the correct pointer installed.
398 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800399 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700400 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700401 if (kCountMarkedObjects) {
402 ++mark_immune_count_;
403 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700404 DCHECK(mark_bitmap_->Test(obj));
405 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
406 if (kCountMarkedObjects) {
407 ++mark_fastpath_count_;
408 }
409 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
410 PushOnMarkStack(obj); // This object was not previously marked.
411 }
412 } else {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700413 if (kCountMarkedObjects) {
414 ++mark_slowpath_count_;
415 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700416 MarkSweepMarkObjectSlowPath visitor(this);
417 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
418 // will check again.
419 if (!mark_bitmap_->Set(obj, visitor)) {
420 PushOnMarkStack(obj); // Was not already marked, push.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700421 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700422 }
423}
424
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700425inline void MarkSweep::PushOnMarkStack(Object* obj) {
426 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
427 // Lock is not needed but is here anyways to please annotalysis.
428 MutexLock mu(Thread::Current(), mark_stack_lock_);
429 ExpandMarkStack();
430 }
431 // The object must be pushed on to the mark stack.
432 mark_stack_->PushBack(obj);
433}
434
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700435inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700436 DCHECK(obj != nullptr);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700437 if (kUseBakerOrBrooksReadBarrier) {
438 // Verify all the objects have the correct pointer installed.
439 obj->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800440 }
Mathieu Chartier8d562102014-03-12 17:42:10 -0700441 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700442 DCHECK(IsMarked(obj));
443 return false;
444 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700445 // Try to take advantage of locality of references within a space, failing this find the space
446 // the hard way.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700447 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700448 if (LIKELY(object_bitmap->HasAddress(obj))) {
449 return !object_bitmap->AtomicTestAndSet(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700450 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700451 MarkSweepMarkObjectSlowPath visitor(this);
452 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700453}
454
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700455// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
456inline void MarkSweep::MarkObject(Object* obj) {
457 if (obj != nullptr) {
Mathieu Chartier9642c962013-08-05 17:40:36 -0700458 MarkObjectNonNull(obj);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700459 } else if (kCountMarkedObjects) {
460 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700461 }
462}
463
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700464class VerifyRootMarkedVisitor : public SingleRootVisitor {
465 public:
466 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
467
468 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
469 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
470 CHECK(collector_->IsMarked(root)) << info.ToString();
471 }
472
473 private:
474 MarkSweep* const collector_;
475};
476
477void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
478 const RootInfo& info ATTRIBUTE_UNUSED) {
479 for (size_t i = 0; i < count; ++i) {
480 MarkObjectNonNull(*roots[i]);
481 }
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800482}
483
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700484void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
485 const RootInfo& info ATTRIBUTE_UNUSED) {
486 for (size_t i = 0; i < count; ++i) {
487 MarkObjectNonNull(roots[i]->AsMirrorPtr());
488 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800489}
490
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700491class VerifyRootVisitor : public SingleRootVisitor {
492 public:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700493 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
494 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700495 // See if the root is on any space bitmap.
496 auto* heap = Runtime::Current()->GetHeap();
497 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
498 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
499 if (large_object_space != nullptr && !large_object_space->Contains(root)) {
500 LOG(ERROR) << "Found invalid root: " << root << " " << info;
501 }
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700502 }
503 }
Mathieu Chartier9086b652015-04-14 09:35:18 -0700504};
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700505
506void MarkSweep::VerifyRoots() {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700507 VerifyRootVisitor visitor;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700508 Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700509}
510
Mathieu Chartier893263b2014-03-04 11:07:42 -0800511void MarkSweep::MarkRoots(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700512 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -0800513 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
514 // If we exclusively hold the mutator lock, all threads must be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700515 Runtime::Current()->VisitRoots(this);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800516 RevokeAllThreadLocalAllocationStacks(self);
517 } else {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700518 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800519 // At this point the live stack should no longer have any mutators which push into it.
520 MarkNonThreadRoots();
521 MarkConcurrentRoots(
522 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
523 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700524}
525
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700526void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700527 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700528 Runtime::Current()->VisitNonThreadRoots(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700529}
530
Mathieu Chartier893263b2014-03-04 11:07:42 -0800531void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700532 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Ian Rogers1d54e732013-05-02 21:10:01 -0700533 // Visit all runtime roots and clear dirty flags.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700534 Runtime::Current()->VisitConcurrentRoots(this, flags);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700535}
536
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700537class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700538 public:
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700539 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
540 : mark_sweep_(mark_sweep) {}
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700541
Mathieu Chartier407f7022014-02-18 14:37:05 -0800542 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
543 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700544 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800545 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
546 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
547 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700548 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700549 }
550
551 private:
552 MarkSweep* const mark_sweep_;
553};
554
Mathieu Chartier407f7022014-02-18 14:37:05 -0800555class DelayReferenceReferentVisitor {
556 public:
557 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
558 }
559
560 void operator()(mirror::Class* klass, mirror::Reference* ref) const
561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
562 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
563 collector_->DelayReferenceReferent(klass, ref);
564 }
565
566 private:
567 MarkSweep* const collector_;
568};
569
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700570template <bool kUseFinger = false>
571class MarkStackTask : public Task {
572 public:
573 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800574 StackReference<Object>* mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700575 : mark_sweep_(mark_sweep),
576 thread_pool_(thread_pool),
577 mark_stack_pos_(mark_stack_size) {
578 // We may have to copy part of an existing mark stack when another mark stack overflows.
579 if (mark_stack_size != 0) {
580 DCHECK(mark_stack != NULL);
581 // TODO: Check performance?
582 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700583 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700584 if (kCountTasks) {
585 ++mark_sweep_->work_chunks_created_;
586 }
587 }
588
589 static const size_t kMaxSize = 1 * KB;
590
591 protected:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800592 class MarkObjectParallelVisitor {
593 public:
594 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
595 MarkSweep* mark_sweep) ALWAYS_INLINE
596 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
597
598 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
599 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700600 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800601 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
602 if (kUseFinger) {
603 android_memory_barrier();
604 if (reinterpret_cast<uintptr_t>(ref) >=
Ian Rogers3e5cf302014-05-20 16:40:37 -0700605 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800606 return;
607 }
608 }
609 chunk_task_->MarkStackPush(ref);
610 }
611 }
612
613 private:
614 MarkStackTask<kUseFinger>* const chunk_task_;
615 MarkSweep* const mark_sweep_;
616 };
617
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700618 class ScanObjectParallelVisitor {
619 public:
620 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
621 : chunk_task_(chunk_task) {}
622
Mathieu Chartier407f7022014-02-18 14:37:05 -0800623 // No thread safety analysis since multiple threads will use this visitor.
624 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
625 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
626 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
627 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
628 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
629 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700630 }
631
632 private:
633 MarkStackTask<kUseFinger>* const chunk_task_;
634 };
635
636 virtual ~MarkStackTask() {
637 // Make sure that we have cleared our mark stack.
638 DCHECK_EQ(mark_stack_pos_, 0U);
639 if (kCountTasks) {
640 ++mark_sweep_->work_chunks_deleted_;
641 }
642 }
643
644 MarkSweep* const mark_sweep_;
645 ThreadPool* const thread_pool_;
646 // Thread local mark stack for this task.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800647 StackReference<Object> mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700648 // Mark stack position.
649 size_t mark_stack_pos_;
650
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800651 ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700652 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
653 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
654 mark_stack_pos_ /= 2;
655 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
656 mark_stack_ + mark_stack_pos_);
657 thread_pool_->AddTask(Thread::Current(), task);
658 }
659 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700660 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800661 mark_stack_[mark_stack_pos_++].Assign(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700662 }
663
664 virtual void Finalize() {
665 delete this;
666 }
667
668 // Scans all of the objects
Mathieu Chartier407f7022014-02-18 14:37:05 -0800669 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
670 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700671 UNUSED(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700672 ScanObjectParallelVisitor visitor(this);
673 // TODO: Tune this.
674 static const size_t kFifoSize = 4;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700675 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700676 for (;;) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700677 Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700678 if (kUseMarkStackPrefetch) {
679 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800680 Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800681 DCHECK(mark_stack_obj != nullptr);
682 __builtin_prefetch(mark_stack_obj);
683 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700684 }
685 if (UNLIKELY(prefetch_fifo.empty())) {
686 break;
687 }
688 obj = prefetch_fifo.front();
689 prefetch_fifo.pop_front();
690 } else {
691 if (UNLIKELY(mark_stack_pos_ == 0)) {
692 break;
693 }
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800694 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700695 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700696 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700697 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700698 }
699 }
700};
701
702class CardScanTask : public MarkStackTask<false> {
703 public:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700704 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
705 accounting::ContinuousSpaceBitmap* bitmap,
Ian Rogers13735952014-10-08 12:43:28 -0700706 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800707 StackReference<Object>* mark_stack_obj, bool clear_card)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700708 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
709 bitmap_(bitmap),
710 begin_(begin),
711 end_(end),
Lei Li727b2942015-01-15 11:26:34 +0800712 minimum_age_(minimum_age), clear_card_(clear_card) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700713 }
714
715 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700716 accounting::ContinuousSpaceBitmap* const bitmap_;
Ian Rogers13735952014-10-08 12:43:28 -0700717 uint8_t* const begin_;
718 uint8_t* const end_;
719 const uint8_t minimum_age_;
Lei Li727b2942015-01-15 11:26:34 +0800720 const bool clear_card_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700721
722 virtual void Finalize() {
723 delete this;
724 }
725
726 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
727 ScanObjectParallelVisitor visitor(this);
728 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Lei Li727b2942015-01-15 11:26:34 +0800729 size_t cards_scanned = clear_card_ ?
730 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
731 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700732 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
733 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700734 // Finish by emptying our local mark stack.
735 MarkStackTask::Run(self);
736 }
737};
738
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700739size_t MarkSweep::GetThreadCount(bool paused) const {
740 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700741 return 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700742 }
Mathieu Chartier10d68862015-04-15 14:21:33 -0700743 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700744}
745
Ian Rogers13735952014-10-08 12:43:28 -0700746void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700747 accounting::CardTable* card_table = GetHeap()->GetCardTable();
748 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700749 size_t thread_count = GetThreadCount(paused);
750 // The parallel version with only one thread is faster for card scanning, TODO: fix.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700751 if (kParallelCardScan && thread_count > 1) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700752 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700753 // Can't have a different split for each space since multiple spaces can have their cards being
754 // scanned at the same time.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700755 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
756 GetTimings());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700757 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800758 StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
759 StackReference<Object>* mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700760 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700761 // Estimated number of work tasks we will create.
762 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
763 DCHECK_NE(mark_stack_tasks, 0U);
764 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
765 mark_stack_size / mark_stack_tasks + 1);
766 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700767 if (space->GetMarkBitmap() == nullptr) {
768 continue;
769 }
Ian Rogers13735952014-10-08 12:43:28 -0700770 uint8_t* card_begin = space->Begin();
771 uint8_t* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800772 // Align up the end address. For example, the image space's end
773 // may not be card-size-aligned.
774 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
775 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
776 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700777 // Calculate how many bytes of heap we will scan,
778 const size_t address_range = card_end - card_begin;
779 // Calculate how much address range each task gets.
780 const size_t card_delta = RoundUp(address_range / thread_count + 1,
781 accounting::CardTable::kCardSize);
Lei Li727b2942015-01-15 11:26:34 +0800782 // If paused and the space is neither zygote nor image space, we could clear the dirty
783 // cards to avoid accumulating them to increase card scanning load in the following GC
784 // cycles. We need to keep dirty cards of image space and zygote space in order to track
785 // references to the other spaces.
786 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700787 // Create the worker tasks for this space.
788 while (card_begin != card_end) {
789 // Add a range of cards.
790 size_t addr_remaining = card_end - card_begin;
791 size_t card_increment = std::min(card_delta, addr_remaining);
792 // Take from the back of the mark stack.
793 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
794 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
795 mark_stack_end -= mark_stack_increment;
796 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700797 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700798 // Add the new task to the thread pool.
799 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
800 card_begin + card_increment, minimum_age,
Lei Li727b2942015-01-15 11:26:34 +0800801 mark_stack_increment, mark_stack_end, clear_card);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700802 thread_pool->AddTask(self, task);
803 card_begin += card_increment;
804 }
805 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700806
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800807 // Note: the card scan below may dirty new cards (and scan them)
808 // as a side effect when a Reference object is encountered and
809 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700810 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700811 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700812 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700813 thread_pool->StopWorkers(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700814 } else {
815 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700816 if (space->GetMarkBitmap() != nullptr) {
817 // Image spaces are handled properly since live == marked for them.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700818 const char* name = nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700819 switch (space->GetGcRetentionPolicy()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700820 case space::kGcRetentionPolicyNeverCollect:
821 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
822 break;
823 case space::kGcRetentionPolicyFullCollect:
824 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
825 break;
826 case space::kGcRetentionPolicyAlwaysCollect:
827 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
828 break;
829 default:
830 LOG(FATAL) << "Unreachable";
Ian Rogers2c4257b2014-10-24 14:20:06 -0700831 UNREACHABLE();
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700832 }
833 TimingLogger::ScopedTiming t(name, GetTimings());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700834 ScanObjectVisitor visitor(this);
Lei Li727b2942015-01-15 11:26:34 +0800835 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
836 if (clear_card) {
837 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
838 minimum_age);
839 } else {
840 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
841 minimum_age);
842 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700843 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700844 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700845 }
846}
847
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700848class RecursiveMarkTask : public MarkStackTask<false> {
849 public:
850 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700851 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700852 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700853 end_(end) {
854 }
855
856 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700857 accounting::ContinuousSpaceBitmap* const bitmap_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700858 const uintptr_t begin_;
859 const uintptr_t end_;
860
861 virtual void Finalize() {
862 delete this;
863 }
864
865 // Scans all of the objects
866 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
867 ScanObjectParallelVisitor visitor(this);
868 bitmap_->VisitMarkedRange(begin_, end_, visitor);
869 // Finish by emptying our local mark stack.
870 MarkStackTask::Run(self);
871 }
872};
873
Carl Shapiro58551df2011-07-24 03:09:51 -0700874// Populates the mark stack based on the set of marked objects and
875// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800876void MarkSweep::RecursiveMark() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700877 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800878 // RecursiveMark will build the lists of known instances of the Reference classes. See
879 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700880 if (kUseRecursiveMark) {
881 const bool partial = GetGcType() == kGcTypePartial;
882 ScanObjectVisitor scan_visitor(this);
883 auto* self = Thread::Current();
884 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700885 size_t thread_count = GetThreadCount(false);
886 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700887 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700888 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700889 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
890 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700891 current_space_bitmap_ = space->GetMarkBitmap();
892 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700893 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800894 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700895 if (parallel) {
896 // We will use the mark stack the future.
897 // CHECK(mark_stack_->IsEmpty());
898 // This function does not handle heap end increasing, so we must use the space end.
899 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
900 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Ian Rogers3e5cf302014-05-20 16:40:37 -0700901 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700902
903 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700904 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700905 while (begin != end) {
906 uintptr_t start = begin;
907 uintptr_t delta = (end - begin) / n;
908 delta = RoundUp(delta, KB);
909 if (delta < 16 * KB) delta = end - begin;
910 begin += delta;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700911 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700912 begin);
913 thread_pool->AddTask(self, task);
914 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700915 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700916 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700917 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700918 thread_pool->StopWorkers(self);
919 } else {
920 // This function does not handle heap end increasing, so we must use the space end.
921 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
922 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700923 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700924 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700925 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700926 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700927 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700928 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700929}
930
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800931mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
Mathieu Chartier5712d5d2013-09-18 17:59:36 -0700932 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700933 return object;
934 }
935 return nullptr;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700936}
937
Ian Rogers13735952014-10-08 12:43:28 -0700938void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700939 ScanGrayObjects(paused, minimum_age);
940 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700941}
942
Carl Shapiro58551df2011-07-24 03:09:51 -0700943void MarkSweep::ReMarkRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700944 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -0800945 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700946 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
947 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
Mathieu Chartier7bf9f192014-04-04 11:09:41 -0700948 if (kVerifyRootsMarked) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800949 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700950 VerifyRootMarkedVisitor visitor(this);
951 Runtime::Current()->VisitRoots(&visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800952 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700953}
954
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700955void MarkSweep::SweepSystemWeaks(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700956 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700957 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700958 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
Carl Shapiro58551df2011-07-24 03:09:51 -0700959}
960
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700961mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700962 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
963 // We don't actually want to sweep the object, so lets return "marked"
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700964 return obj;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700965}
966
967void MarkSweep::VerifyIsLive(const Object* obj) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700968 if (!heap_->GetLiveBitmap()->Test(obj)) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800969 // TODO: Consider live stack? Has this code bitrotted?
970 CHECK(!heap_->allocation_stack_->Contains(obj))
971 << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700972 }
973}
974
975void MarkSweep::VerifySystemWeaks() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700976 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700977 // Verify system weaks, uses a special object visitor which returns the input object.
978 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700979}
980
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700981class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700982 public:
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700983 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
984 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
985 : mark_sweep_(mark_sweep),
986 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
987 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
988 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700989
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700990 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
991 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
992 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
993 for (size_t i = 0; i < count; ++i) {
994 mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
995 }
996 }
997
998 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
999 const RootInfo& info ATTRIBUTE_UNUSED)
1000 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1001 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1002 for (size_t i = 0; i < count; ++i) {
1003 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1004 }
1005 }
1006
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001007 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier3f966702013-09-04 16:50:05 -07001008 ATRACE_BEGIN("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001009 // Note: self is not necessarily equal to thread since thread may be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001010 Thread* const self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001011 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1012 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001013 thread->VisitRoots(this);
Mathieu Chartier3f966702013-09-04 16:50:05 -07001014 ATRACE_END();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001015 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1016 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001017 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001018 ATRACE_END();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001019 }
Lei Lidd9943d2015-02-02 14:24:44 +08001020 // If thread is a running mutator, then act on behalf of the garbage collector.
1021 // See the code in ThreadList::RunCheckpoint.
1022 if (thread->GetState() == kRunnable) {
1023 mark_sweep_->GetBarrier().Pass(self);
1024 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001025 }
1026
1027 private:
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001028 MarkSweep* const mark_sweep_;
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001029 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001030};
1031
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001032void MarkSweep::MarkRootsCheckpoint(Thread* self,
1033 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001034 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001035 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001036 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001037 // Request the check point is run on all threads returning a count of the threads that must
1038 // run through the barrier including self.
1039 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1040 // Release locks then wait for all mutator threads to pass the barrier.
Lei Lidd9943d2015-02-02 14:24:44 +08001041 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1042 // then no need to release locks.
1043 if (barrier_count == 0) {
1044 return;
1045 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001046 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1047 Locks::mutator_lock_->SharedUnlock(self);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001048 {
1049 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1050 gc_barrier_->Increment(self, barrier_count);
1051 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001052 Locks::mutator_lock_->SharedLock(self);
1053 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001054}
1055
Ian Rogers1d54e732013-05-02 21:10:01 -07001056void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001057 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001058 Thread* self = Thread::Current();
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -07001059 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1060 sweep_array_free_buffer_mem_map_->BaseBegin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001061 size_t chunk_free_pos = 0;
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001062 ObjectBytePair freed;
1063 ObjectBytePair freed_los;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001064 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001065 StackReference<Object>* objects = allocations->Begin();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001066 size_t count = allocations->Size();
1067 // Change the order to ensure that the non-moving space last swept as an optimization.
1068 std::vector<space::ContinuousSpace*> sweep_spaces;
1069 space::ContinuousSpace* non_moving_space = nullptr;
1070 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001071 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1072 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001073 if (space == heap_->GetNonMovingSpace()) {
1074 non_moving_space = space;
1075 } else {
1076 sweep_spaces.push_back(space);
1077 }
1078 }
1079 }
1080 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1081 // the other alloc spaces as an optimization.
1082 if (non_moving_space != nullptr) {
1083 sweep_spaces.push_back(non_moving_space);
1084 }
1085 // Start by sweeping the continuous spaces.
1086 for (space::ContinuousSpace* space : sweep_spaces) {
1087 space::AllocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001088 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1089 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001090 if (swap_bitmaps) {
1091 std::swap(live_bitmap, mark_bitmap);
1092 }
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001093 StackReference<Object>* out = objects;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001094 for (size_t i = 0; i < count; ++i) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001095 Object* const obj = objects[i].AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001096 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1097 continue;
1098 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001099 if (space->HasAddress(obj)) {
1100 // This object is in the space, remove it from the array and add it to the sweep buffer
1101 // if needed.
1102 if (!mark_bitmap->Test(obj)) {
1103 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001104 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001105 freed.objects += chunk_free_pos;
1106 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001107 chunk_free_pos = 0;
1108 }
1109 chunk_free_buffer[chunk_free_pos++] = obj;
1110 }
1111 } else {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001112 (out++)->Assign(obj);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001113 }
1114 }
1115 if (chunk_free_pos > 0) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001116 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001117 freed.objects += chunk_free_pos;
1118 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001119 chunk_free_pos = 0;
1120 }
1121 // All of the references which space contained are no longer in the allocation stack, update
1122 // the count.
1123 count = out - objects;
1124 }
1125 // Handle the large object space.
1126 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001127 if (large_object_space != nullptr) {
1128 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1129 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1130 if (swap_bitmaps) {
1131 std::swap(large_live_objects, large_mark_objects);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001132 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001133 for (size_t i = 0; i < count; ++i) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001134 Object* const obj = objects[i].AsMirrorPtr();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001135 // Handle large objects.
1136 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1137 continue;
1138 }
1139 if (!large_mark_objects->Test(obj)) {
1140 ++freed_los.objects;
1141 freed_los.bytes += large_object_space->Free(self, obj);
1142 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001143 }
1144 }
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001145 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001146 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001147 RecordFree(freed);
1148 RecordFreeLOS(freed_los);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001149 t2.NewTiming("ResetStack");
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001150 allocations->Reset();
1151 }
Ian Rogersc5f17732014-06-05 20:48:42 -07001152 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001153}
1154
Ian Rogers1d54e732013-05-02 21:10:01 -07001155void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001156 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001157 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1158 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001159 {
1160 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1161 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1162 // knowing that new allocations won't be marked as live.
1163 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1164 heap_->MarkAllocStackAsLive(live_stack);
1165 live_stack->Reset();
1166 DCHECK(mark_stack_->IsEmpty());
1167 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001168 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001169 if (space->IsContinuousMemMapAllocSpace()) {
1170 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001171 TimingLogger::ScopedTiming split(
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001172 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
1173 RecordFree(alloc_space->Sweep(swap_bitmaps));
Carl Shapiro58551df2011-07-24 03:09:51 -07001174 }
1175 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001176 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001177}
1178
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001179void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001180 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1181 if (los != nullptr) {
1182 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1183 RecordFreeLOS(los->Sweep(swap_bitmaps));
1184 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001185}
1186
Mathieu Chartier407f7022014-02-18 14:37:05 -08001187// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
1188// marked, put it on the appropriate list in the heap for later processing.
1189void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001190 if (kCountJavaLangRefs) {
1191 ++reference_count_;
1192 }
Mathieu Chartier308351a2014-06-15 12:39:02 -07001193 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
1194 this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001195}
1196
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001197class MarkObjectVisitor {
1198 public:
Mathieu Chartier407f7022014-02-18 14:37:05 -08001199 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1200 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001201
Mathieu Chartier407f7022014-02-18 14:37:05 -08001202 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1203 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1204 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001205 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001206 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1207 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1208 }
Ian Rogersb0fa5dc2014-04-28 16:47:08 -07001209 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001210 }
1211
1212 private:
1213 MarkSweep* const mark_sweep_;
1214};
1215
Carl Shapiro69759ea2011-07-21 18:13:35 -07001216// Scans an object reference. Determines the type of the reference
1217// and dispatches to a specialized scanning routine.
Mathieu Chartier590fee92013-09-13 13:46:47 -07001218void MarkSweep::ScanObject(Object* obj) {
Mathieu Chartier407f7022014-02-18 14:37:05 -08001219 MarkObjectVisitor mark_visitor(this);
1220 DelayReferenceReferentVisitor ref_visitor(this);
1221 ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001222}
1223
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001224void MarkSweep::ProcessMarkStackCallback(void* arg) {
1225 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
Mathieu Chartier3bb57c72014-02-18 11:38:45 -08001226}
1227
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001228void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001229 Thread* self = Thread::Current();
1230 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001231 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1232 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001233 CHECK_GT(chunk_size, 0U);
1234 // Split the current mark stack up into work tasks.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001235 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001236 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001237 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001238 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001239 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001240 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001241 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001242 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001243 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001244 mark_stack_->Reset();
Ian Rogers3e5cf302014-05-20 16:40:37 -07001245 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1246 work_chunks_deleted_.LoadSequentiallyConsistent())
1247 << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001248}
1249
Ian Rogers5d76c432011-10-31 21:42:49 -07001250// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001251void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001252 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001253 size_t thread_count = GetThreadCount(paused);
1254 if (kParallelProcessMarkStack && thread_count > 1 &&
1255 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1256 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001257 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001258 // TODO: Tune this.
1259 static const size_t kFifoSize = 4;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001260 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001261 for (;;) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001262 Object* obj = NULL;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001263 if (kUseMarkStackPrefetch) {
1264 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001265 Object* mark_stack_obj = mark_stack_->PopBack();
1266 DCHECK(mark_stack_obj != NULL);
1267 __builtin_prefetch(mark_stack_obj);
1268 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001269 }
1270 if (prefetch_fifo.empty()) {
1271 break;
1272 }
1273 obj = prefetch_fifo.front();
1274 prefetch_fifo.pop_front();
1275 } else {
1276 if (mark_stack_->IsEmpty()) {
1277 break;
1278 }
1279 obj = mark_stack_->PopBack();
1280 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001281 DCHECK(obj != nullptr);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001282 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001283 }
1284 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001285}
1286
Mathieu Chartier52e4b432014-06-10 11:22:31 -07001287inline bool MarkSweep::IsMarked(const Object* object) const {
Mathieu Chartier8d562102014-03-12 17:42:10 -07001288 if (immune_region_.ContainsObject(object)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001289 return true;
1290 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001291 if (current_space_bitmap_->HasAddress(object)) {
1292 return current_space_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001293 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001294 return mark_bitmap_->Test(object);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001295}
1296
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001297void MarkSweep::FinishPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001298 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001299 if (kCountScannedTypes) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001300 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1301 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001302 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001303 if (kCountTasks) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001304 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001305 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001306 if (kMeasureOverhead) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001307 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001308 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001309 if (kProfileLargeObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001310 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1311 << " marked " << large_object_mark_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001312 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001313 if (kCountJavaLangRefs) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001314 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001315 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001316 if (kCountMarkedObjects) {
Ian Rogers3e5cf302014-05-20 16:40:37 -07001317 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1318 << " immune=" << mark_immune_count_.LoadRelaxed()
1319 << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1320 << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001321 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001322 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001323 mark_stack_->Reset();
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001324 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1325 heap_->ClearMarkedObjects();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001326}
1327
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001328void MarkSweep::RevokeAllThreadLocalBuffers() {
1329 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1330 // If concurrent, rosalloc thread-local buffers are revoked at the
1331 // thread checkpoint. Bump pointer space thread-local buffers must
1332 // not be in use.
1333 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1334 } else {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001335 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001336 GetHeap()->RevokeAllThreadLocalBuffers();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001337 }
1338}
1339
Ian Rogers1d54e732013-05-02 21:10:01 -07001340} // namespace collector
1341} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001342} // namespace art