blob: dd750060b86f2e225fe29bcd6d70854c733fee21 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070020#include "base/stl_util.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070021#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/accounting/heap_bitmap-inl.h"
23#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070024#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080026#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080027#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070029#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030#include "mirror/object-inl.h"
31#include "scoped_thread_state_change.h"
32#include "thread-inl.h"
33#include "thread_list.h"
34#include "well_known_classes.h"
35
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070036namespace art {
37namespace gc {
38namespace collector {
39
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070040static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
41
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080042ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
43 : GarbageCollector(heap,
44 name_prefix + (name_prefix.empty() ? "" : " ") +
45 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070046 region_space_(nullptr), gc_barrier_(new Barrier(0)),
47 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070048 kDefaultGcMarkStackSize,
49 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070050 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
51 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080052 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070053 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
54 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080055 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
56 rb_table_(heap_->GetReadBarrierTable()),
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +000057 force_evacuate_all_(false) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
59 "The region space size and the read barrier table region size must match");
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +000060 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070061 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080063 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
64 // Cache this so that we won't have to lock heap_bitmap_lock_ in
65 // Mark() which could cause a nested lock on heap_bitmap_lock_
66 // when GC causes a RB while doing GC or a lock order violation
67 // (class_linker_lock_ and heap_bitmap_lock_).
68 heap_mark_bitmap_ = heap->GetMarkBitmap();
69 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 {
71 MutexLock mu(self, mark_stack_lock_);
72 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
73 accounting::AtomicStack<mirror::Object>* mark_stack =
74 accounting::AtomicStack<mirror::Object>::Create(
75 "thread local mark stack", kMarkStackSize, kMarkStackSize);
76 pooled_mark_stacks_.push_back(mark_stack);
77 }
78 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080079}
80
Mathieu Chartierb19ccb12015-07-15 10:24:16 -070081void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
82 // Used for preserving soft references, should be OK to not have a CAS here since there should be
83 // no other threads which can trigger read barriers on the same referent during reference
84 // processing.
85 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -070086 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -070087}
88
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070090 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080091}
92
93void ConcurrentCopying::RunPhases() {
94 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
95 CHECK(!is_active_);
96 is_active_ = true;
97 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070098 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080099 Locks::mutator_lock_->AssertNotHeld(self);
100 {
101 ReaderMutexLock mu(self, *Locks::mutator_lock_);
102 InitializePhase();
103 }
104 FlipThreadRoots();
105 {
106 ReaderMutexLock mu(self, *Locks::mutator_lock_);
107 MarkingPhase();
108 }
109 // Verify no from space refs. This causes a pause.
110 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
111 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
112 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700113 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800114 if (kVerboseMode) {
115 LOG(INFO) << "Verifying no from-space refs";
116 }
117 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700118 if (kVerboseMode) {
119 LOG(INFO) << "Done verifying no from-space refs";
120 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700121 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800122 }
123 {
124 ReaderMutexLock mu(self, *Locks::mutator_lock_);
125 ReclaimPhase();
126 }
127 FinishPhase();
128 CHECK(is_active_);
129 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700130 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800131}
132
133void ConcurrentCopying::BindBitmaps() {
134 Thread* self = Thread::Current();
135 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
136 // Mark all of the spaces we never collect as immune.
137 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800138 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
139 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800140 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800141 immune_spaces_.AddSpace(space);
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +0000142 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
143 "cc zygote space bitmap";
144 // TODO: try avoiding using bitmaps for image/zygote to save space.
145 accounting::ContinuousSpaceBitmap* bitmap =
146 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
147 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
148 cc_bitmaps_.push_back(bitmap);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800149 } else if (space == region_space_) {
150 accounting::ContinuousSpaceBitmap* bitmap =
151 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
152 space->Begin(), space->Capacity());
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +0000153 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
154 cc_bitmaps_.push_back(bitmap);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800155 region_space_bitmap_ = bitmap;
156 }
157 }
158}
159
160void ConcurrentCopying::InitializePhase() {
161 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
162 if (kVerboseMode) {
163 LOG(INFO) << "GC InitializePhase";
164 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
165 << reinterpret_cast<void*>(region_space_->Limit());
166 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700167 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800168 if (kIsDebugBuild) {
169 MutexLock mu(Thread::Current(), mark_stack_lock_);
170 CHECK(false_gray_stack_.empty());
171 }
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800172 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800173 bytes_moved_.StoreRelaxed(0);
174 objects_moved_.StoreRelaxed(0);
175 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
176 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
177 GetCurrentIteration()->GetClearSoftReferences()) {
178 force_evacuate_all_ = true;
179 } else {
180 force_evacuate_all_ = false;
181 }
182 BindBitmaps();
183 if (kVerboseMode) {
184 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800185 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
186 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
187 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
188 LOG(INFO) << "Immune space: " << *space;
189 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800190 LOG(INFO) << "GC end of InitializePhase";
191 }
192}
193
194// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700195class ConcurrentCopying::ThreadFlipVisitor : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800196 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100197 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800198 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
199 }
200
Mathieu Chartier90443472015-07-16 20:32:27 -0700201 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800202 // Note: self is not necessarily equal to thread since thread may be suspended.
203 Thread* self = Thread::Current();
204 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
205 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700206 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800207 if (use_tlab_ && thread->HasTlab()) {
208 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
209 // This must come before the revoke.
210 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
211 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
212 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
213 FetchAndAddSequentiallyConsistent(thread_local_objects);
214 } else {
215 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
216 }
217 }
218 if (kUseThreadLocalAllocationStack) {
219 thread->RevokeThreadLocalAllocationStack();
220 }
221 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700222 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800223 concurrent_copying_->GetBarrier().Pass(self);
224 }
225
226 private:
227 ConcurrentCopying* const concurrent_copying_;
228 const bool use_tlab_;
229};
230
231// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700232class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800233 public:
234 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
235 : concurrent_copying_(concurrent_copying) {
236 }
237
Mathieu Chartier90443472015-07-16 20:32:27 -0700238 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800239 ConcurrentCopying* cc = concurrent_copying_;
240 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
241 // Note: self is not necessarily equal to thread since thread may be suspended.
242 Thread* self = Thread::Current();
243 CHECK(thread == self);
244 Locks::mutator_lock_->AssertExclusiveHeld(self);
245 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700246 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800247 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
248 cc->RecordLiveStackFreezeSize(self);
249 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
250 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
251 }
252 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700253 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800254 if (kIsDebugBuild) {
255 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
256 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800257 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800258 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800259 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700260 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800261 }
262 }
263
264 private:
265 ConcurrentCopying* const concurrent_copying_;
266};
267
268// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
269void ConcurrentCopying::FlipThreadRoots() {
270 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
271 if (kVerboseMode) {
272 LOG(INFO) << "time=" << region_space_->Time();
273 region_space_->DumpNonFreeRegions(LOG(INFO));
274 }
275 Thread* self = Thread::Current();
276 Locks::mutator_lock_->AssertNotHeld(self);
277 gc_barrier_->Init(self, 0);
278 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
279 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700280 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800281 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
282 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700283 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800284 {
285 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
286 gc_barrier_->Increment(self, barrier_count);
287 }
288 is_asserting_to_space_invariant_ = true;
289 QuasiAtomic::ThreadFenceForConstructor();
290 if (kVerboseMode) {
291 LOG(INFO) << "time=" << region_space_->Time();
292 region_space_->DumpNonFreeRegions(LOG(INFO));
293 LOG(INFO) << "GC end of FlipThreadRoots";
294 }
295}
296
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700297void ConcurrentCopying::SwapStacks() {
298 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800299}
300
301void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
302 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
303 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
304}
305
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +0000306// Used to visit objects in the immune spaces.
307class ConcurrentCopying::ImmuneSpaceObjVisitor {
308 public:
309 explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
310
311 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
312 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
313 DCHECK(obj != nullptr);
314 DCHECK(collector_->immune_spaces_.ContainsObject(obj));
315 accounting::ContinuousSpaceBitmap* cc_bitmap =
316 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
317 DCHECK(cc_bitmap != nullptr)
318 << "An immune space object must have a bitmap";
319 if (kIsDebugBuild) {
320 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
321 << "Immune space object must be already marked";
322 }
323 collector_->MarkUnevacFromSpaceRegionOrImmuneSpace(obj, cc_bitmap);
324 }
325
326 private:
327 ConcurrentCopying* const collector_;
328};
329
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330class EmptyCheckpoint : public Closure {
331 public:
332 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
333 : concurrent_copying_(concurrent_copying) {
334 }
335
336 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
337 // Note: self is not necessarily equal to thread since thread may be suspended.
338 Thread* self = Thread::Current();
339 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
340 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800341 // If thread is a running mutator, then act on behalf of the garbage collector.
342 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700343 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800344 }
345
346 private:
347 ConcurrentCopying* const concurrent_copying_;
348};
349
350// Concurrently mark roots that are guarded by read barriers and process the mark stack.
351void ConcurrentCopying::MarkingPhase() {
352 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
353 if (kVerboseMode) {
354 LOG(INFO) << "GC MarkingPhase";
355 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700356 CHECK(weak_ref_access_enabled_);
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +0000357 {
358 // Mark the image root. The WB-based collectors do not need to
359 // scan the image objects from roots by relying on the card table,
360 // but it's necessary for the RB to-space invariant to hold.
361 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
362 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
363 if (space->IsImageSpace()) {
364 gc::space::ImageSpace* image = space->AsImageSpace();
365 if (image != nullptr) {
366 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
367 mirror::Object* marked_image_root = Mark(image_root);
368 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
369 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
370 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
371 }
372 }
373 }
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700374 }
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700375 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800376 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700377 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
378 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 }
380 {
381 // TODO: don't visit the transaction roots if it's not active.
382 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700383 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800384 }
385
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +0000386 // Immune spaces.
387 for (auto& space : immune_spaces_.GetSpaces()) {
388 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
389 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
390 ImmuneSpaceObjVisitor visitor(this);
391 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
392 reinterpret_cast<uintptr_t>(space->Limit()),
393 visitor);
394 }
395
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800396 Thread* self = Thread::Current();
397 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700398 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700399 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
400 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
401 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
402 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
403 // reach the point where we process weak references, we can avoid using a lock when accessing
404 // the GC mark stack, which makes mark stack processing more efficient.
405
406 // Process the mark stack once in the thread local stack mode. This marks most of the live
407 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
408 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
409 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800410 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700411 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
412 // for the last time before transitioning to the shared mark stack mode, which would process new
413 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
414 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
415 // important to do these together in a single checkpoint so that we can ensure that mutators
416 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
417 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
418 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
419 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
420 SwitchToSharedMarkStackMode();
421 CHECK(!self->GetWeakRefAccessEnabled());
422 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
423 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
424 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
425 // (via read barriers) have no way to produce any more refs to process. Marking converges once
426 // before we process weak refs below.
427 ProcessMarkStack();
428 CheckEmptyMarkStack();
429 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
430 // lock from this point on.
431 SwitchToGcExclusiveMarkStackMode();
432 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800433 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800434 LOG(INFO) << "ProcessReferences";
435 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700436 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700437 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700438 ProcessReferences(self);
439 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800440 if (kVerboseMode) {
441 LOG(INFO) << "SweepSystemWeaks";
442 }
443 SweepSystemWeaks(self);
444 if (kVerboseMode) {
445 LOG(INFO) << "SweepSystemWeaks done";
446 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700447 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
448 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
449 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800450 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700451 CheckEmptyMarkStack();
452 // Re-enable weak ref accesses.
453 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700454 // Free data for class loaders that we unloaded.
455 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700456 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700457 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800458 if (kUseBakerReadBarrier) {
459 ProcessFalseGrayStack();
460 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700461 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800462 }
463
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700464 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800465 if (kVerboseMode) {
466 LOG(INFO) << "GC end of MarkingPhase";
467 }
468}
469
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700470void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
471 if (kVerboseMode) {
472 LOG(INFO) << "ReenableWeakRefAccess";
473 }
474 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
475 QuasiAtomic::ThreadFenceForConstructor();
476 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
477 {
478 MutexLock mu(self, *Locks::thread_list_lock_);
479 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
480 for (Thread* thread : thread_list) {
481 thread->SetWeakRefAccessEnabled(true);
482 }
483 }
484 // Unblock blocking threads.
485 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
486 Runtime::Current()->BroadcastForNewSystemWeaks();
487}
488
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700489class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700490 public:
491 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
492 : concurrent_copying_(concurrent_copying) {
493 }
494
495 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
496 // Note: self is not necessarily equal to thread since thread may be suspended.
497 Thread* self = Thread::Current();
498 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
499 << thread->GetState() << " thread " << thread << " self " << self;
500 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700501 // Note a thread that has just started right before this checkpoint may have already this flag
502 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700503 thread->SetIsGcMarking(false);
504 // If thread is a running mutator, then act on behalf of the garbage collector.
505 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700506 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700507 }
508
509 private:
510 ConcurrentCopying* const concurrent_copying_;
511};
512
513void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
514 Thread* self = Thread::Current();
515 DisableMarkingCheckpoint check_point(this);
516 ThreadList* thread_list = Runtime::Current()->GetThreadList();
517 gc_barrier_->Init(self, 0);
518 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
519 // If there are no threads to wait which implies that all the checkpoint functions are finished,
520 // then no need to release the mutator lock.
521 if (barrier_count == 0) {
522 return;
523 }
524 // Release locks then wait for all mutator threads to pass the barrier.
525 Locks::mutator_lock_->SharedUnlock(self);
526 {
527 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
528 gc_barrier_->Increment(self, barrier_count);
529 }
530 Locks::mutator_lock_->SharedLock(self);
531}
532
533void ConcurrentCopying::DisableMarking() {
534 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
535 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
536 is_marking_ = false;
537 QuasiAtomic::ThreadFenceForConstructor();
538 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
539 // still in the middle of a read barrier which may have a from-space ref cached in a local
540 // variable.
541 IssueDisableMarkingCheckpoint();
542 if (kUseTableLookupReadBarrier) {
543 heap_->rb_table_->ClearAll();
544 DCHECK(heap_->rb_table_->IsAllCleared());
545 }
546 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
547 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
548}
549
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800550void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
551 CHECK(kUseBakerReadBarrier);
552 DCHECK(ref != nullptr);
553 MutexLock mu(Thread::Current(), mark_stack_lock_);
554 false_gray_stack_.push_back(ref);
555}
556
557void ConcurrentCopying::ProcessFalseGrayStack() {
558 CHECK(kUseBakerReadBarrier);
559 // Change the objects on the false gray stack from gray to white.
560 MutexLock mu(Thread::Current(), mark_stack_lock_);
561 for (mirror::Object* obj : false_gray_stack_) {
562 DCHECK(IsMarked(obj));
563 // The object could be white here if a thread got preempted after a success at the
564 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
565 // still gray), and the thread ran to register it onto the false gray stack.
566 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
567 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
568 ReadBarrier::WhitePtr());
569 DCHECK(success);
570 }
571 }
572 false_gray_stack_.clear();
573}
574
575
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800576void ConcurrentCopying::IssueEmptyCheckpoint() {
577 Thread* self = Thread::Current();
578 EmptyCheckpoint check_point(this);
579 ThreadList* thread_list = Runtime::Current()->GetThreadList();
580 gc_barrier_->Init(self, 0);
581 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800582 // If there are no threads to wait which implys that all the checkpoint functions are finished,
583 // then no need to release the mutator lock.
584 if (barrier_count == 0) {
585 return;
586 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800587 // Release locks then wait for all mutator threads to pass the barrier.
588 Locks::mutator_lock_->SharedUnlock(self);
589 {
590 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
591 gc_barrier_->Increment(self, barrier_count);
592 }
593 Locks::mutator_lock_->SharedLock(self);
594}
595
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700596void ConcurrentCopying::ExpandGcMarkStack() {
597 DCHECK(gc_mark_stack_->IsFull());
598 const size_t new_size = gc_mark_stack_->Capacity() * 2;
599 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
600 gc_mark_stack_->End());
601 gc_mark_stack_->Resize(new_size);
602 for (auto& ref : temp) {
603 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
604 }
605 DCHECK(!gc_mark_stack_->IsFull());
606}
607
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800608void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700609 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800610 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700611 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
612 CHECK(thread_running_gc_ != nullptr);
613 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700614 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
615 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700616 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
617 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700618 if (UNLIKELY(gc_mark_stack_->IsFull())) {
619 ExpandGcMarkStack();
620 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700621 gc_mark_stack_->PushBack(to_ref);
622 } else {
623 // Otherwise, use a thread-local mark stack.
624 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
625 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
626 MutexLock mu(self, mark_stack_lock_);
627 // Get a new thread local mark stack.
628 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
629 if (!pooled_mark_stacks_.empty()) {
630 // Use a pooled mark stack.
631 new_tl_mark_stack = pooled_mark_stacks_.back();
632 pooled_mark_stacks_.pop_back();
633 } else {
634 // None pooled. Create a new one.
635 new_tl_mark_stack =
636 accounting::AtomicStack<mirror::Object>::Create(
637 "thread local mark stack", 4 * KB, 4 * KB);
638 }
639 DCHECK(new_tl_mark_stack != nullptr);
640 DCHECK(new_tl_mark_stack->IsEmpty());
641 new_tl_mark_stack->PushBack(to_ref);
642 self->SetThreadLocalMarkStack(new_tl_mark_stack);
643 if (tl_mark_stack != nullptr) {
644 // Store the old full stack into a vector.
645 revoked_mark_stacks_.push_back(tl_mark_stack);
646 }
647 } else {
648 tl_mark_stack->PushBack(to_ref);
649 }
650 }
651 } else if (mark_stack_mode == kMarkStackModeShared) {
652 // Access the shared GC mark stack with a lock.
653 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700654 if (UNLIKELY(gc_mark_stack_->IsFull())) {
655 ExpandGcMarkStack();
656 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700657 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800658 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700659 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700660 static_cast<uint32_t>(kMarkStackModeGcExclusive))
661 << "ref=" << to_ref
662 << " self->gc_marking=" << self->GetIsGcMarking()
663 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700664 CHECK(self == thread_running_gc_)
665 << "Only GC-running thread should access the mark stack "
666 << "in the GC exclusive mark stack mode";
667 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700668 if (UNLIKELY(gc_mark_stack_->IsFull())) {
669 ExpandGcMarkStack();
670 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700671 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800672 }
673}
674
675accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
676 return heap_->allocation_stack_.get();
677}
678
679accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
680 return heap_->live_stack_.get();
681}
682
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800683// The following visitors are used to verify that there's no references to the from-space left after
684// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700685class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800686 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700687 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800688 : collector_(collector) {}
689
690 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700691 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800692 if (ref == nullptr) {
693 // OK.
694 return;
695 }
696 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
697 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800698 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
699 << "Ref " << ref << " " << PrettyTypeOf(ref)
700 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800701 }
702 }
703
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700704 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700705 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800706 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700707 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800708 }
709
710 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700711 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800712};
713
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700714class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800715 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700716 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800717 : collector_(collector) {}
718
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700719 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700720 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800721 mirror::Object* ref =
722 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700723 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800724 visitor(ref);
725 }
726 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700727 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800728 CHECK(klass->IsTypeOfReferenceClass());
729 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
730 }
731
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700732 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
733 SHARED_REQUIRES(Locks::mutator_lock_) {
734 if (!root->IsNull()) {
735 VisitRoot(root);
736 }
737 }
738
739 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
740 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700741 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700742 visitor(root->AsMirrorPtr());
743 }
744
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800745 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700746 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800747};
748
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700749class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800750 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700751 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800752 : collector_(collector) {}
753 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700754 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800755 ObjectCallback(obj, collector_);
756 }
757 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700758 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800759 CHECK(obj != nullptr);
760 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
761 space::RegionSpace* region_space = collector->RegionSpace();
762 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700763 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700764 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800765 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800766 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
767 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800768 }
769 }
770
771 private:
772 ConcurrentCopying* const collector_;
773};
774
775// Verify there's no from-space references left after the marking phase.
776void ConcurrentCopying::VerifyNoFromSpaceReferences() {
777 Thread* self = Thread::Current();
778 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700779 // Verify all threads have is_gc_marking to be false
780 {
781 MutexLock mu(self, *Locks::thread_list_lock_);
782 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
783 for (Thread* thread : thread_list) {
784 CHECK(!thread->GetIsGcMarking());
785 }
786 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700787 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800788 // Roots.
789 {
790 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700791 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700792 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800793 }
794 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700795 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800796 // Non-moving spaces.
797 {
798 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
799 heap_->GetMarkBitmap()->Visit(visitor);
800 }
801 // The alloc stack.
802 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700803 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800804 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
805 it < end; ++it) {
806 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800807 if (obj != nullptr && obj->GetClass() != nullptr) {
808 // TODO: need to call this only if obj is alive?
809 ref_visitor(obj);
810 visitor(obj);
811 }
812 }
813 }
814 // TODO: LOS. But only refs in LOS are classes.
815}
816
817// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700818class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800819 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700820 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800821 : collector_(collector) {}
822
823 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700824 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800825 if (ref == nullptr) {
826 // OK.
827 return;
828 }
829 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
830 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800831
832 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700833 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800834};
835
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700836class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800837 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700838 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800839 : collector_(collector) {}
840
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700841 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700842 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800843 mirror::Object* ref =
844 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700845 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800846 visitor(ref);
847 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700848 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700849 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800850 CHECK(klass->IsTypeOfReferenceClass());
851 }
852
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700853 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
854 SHARED_REQUIRES(Locks::mutator_lock_) {
855 if (!root->IsNull()) {
856 VisitRoot(root);
857 }
858 }
859
860 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
861 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700862 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700863 visitor(root->AsMirrorPtr());
864 }
865
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800866 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700867 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800868};
869
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700870class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800871 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700872 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800873 : collector_(collector) {}
874 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700875 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800876 ObjectCallback(obj, collector_);
877 }
878 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700879 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800880 CHECK(obj != nullptr);
881 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
882 space::RegionSpace* region_space = collector->RegionSpace();
883 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
884 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700885 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700886 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800887 }
888
889 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700890 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800891};
892
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700893class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700894 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100895 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
896 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700897 : concurrent_copying_(concurrent_copying),
898 disable_weak_ref_access_(disable_weak_ref_access) {
899 }
900
901 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
902 // Note: self is not necessarily equal to thread since thread may be suspended.
903 Thread* self = Thread::Current();
904 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
905 << thread->GetState() << " thread " << thread << " self " << self;
906 // Revoke thread local mark stacks.
907 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
908 if (tl_mark_stack != nullptr) {
909 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
910 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
911 thread->SetThreadLocalMarkStack(nullptr);
912 }
913 // Disable weak ref access.
914 if (disable_weak_ref_access_) {
915 thread->SetWeakRefAccessEnabled(false);
916 }
917 // If thread is a running mutator, then act on behalf of the garbage collector.
918 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700919 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700920 }
921
922 private:
923 ConcurrentCopying* const concurrent_copying_;
924 const bool disable_weak_ref_access_;
925};
926
927void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
928 Thread* self = Thread::Current();
929 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
930 ThreadList* thread_list = Runtime::Current()->GetThreadList();
931 gc_barrier_->Init(self, 0);
932 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
933 // If there are no threads to wait which implys that all the checkpoint functions are finished,
934 // then no need to release the mutator lock.
935 if (barrier_count == 0) {
936 return;
937 }
938 Locks::mutator_lock_->SharedUnlock(self);
939 {
940 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
941 gc_barrier_->Increment(self, barrier_count);
942 }
943 Locks::mutator_lock_->SharedLock(self);
944}
945
946void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
947 Thread* self = Thread::Current();
948 CHECK_EQ(self, thread);
949 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
950 if (tl_mark_stack != nullptr) {
951 CHECK(is_marking_);
952 MutexLock mu(self, mark_stack_lock_);
953 revoked_mark_stacks_.push_back(tl_mark_stack);
954 thread->SetThreadLocalMarkStack(nullptr);
955 }
956}
957
958void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800959 if (kVerboseMode) {
960 LOG(INFO) << "ProcessMarkStack. ";
961 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700962 bool empty_prev = false;
963 while (true) {
964 bool empty = ProcessMarkStackOnce();
965 if (empty_prev && empty) {
966 // Saw empty mark stack for a second time, done.
967 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800968 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700969 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800970 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700971}
972
973bool ConcurrentCopying::ProcessMarkStackOnce() {
974 Thread* self = Thread::Current();
975 CHECK(thread_running_gc_ != nullptr);
976 CHECK(self == thread_running_gc_);
977 CHECK(self->GetThreadLocalMarkStack() == nullptr);
978 size_t count = 0;
979 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
980 if (mark_stack_mode == kMarkStackModeThreadLocal) {
981 // Process the thread-local mark stacks and the GC mark stack.
982 count += ProcessThreadLocalMarkStacks(false);
983 while (!gc_mark_stack_->IsEmpty()) {
984 mirror::Object* to_ref = gc_mark_stack_->PopBack();
985 ProcessMarkStackRef(to_ref);
986 ++count;
987 }
988 gc_mark_stack_->Reset();
989 } else if (mark_stack_mode == kMarkStackModeShared) {
990 // Process the shared GC mark stack with a lock.
991 {
992 MutexLock mu(self, mark_stack_lock_);
993 CHECK(revoked_mark_stacks_.empty());
994 }
995 while (true) {
996 std::vector<mirror::Object*> refs;
997 {
998 // Copy refs with lock. Note the number of refs should be small.
999 MutexLock mu(self, mark_stack_lock_);
1000 if (gc_mark_stack_->IsEmpty()) {
1001 break;
1002 }
1003 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1004 p != gc_mark_stack_->End(); ++p) {
1005 refs.push_back(p->AsMirrorPtr());
1006 }
1007 gc_mark_stack_->Reset();
1008 }
1009 for (mirror::Object* ref : refs) {
1010 ProcessMarkStackRef(ref);
1011 ++count;
1012 }
1013 }
1014 } else {
1015 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1016 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1017 {
1018 MutexLock mu(self, mark_stack_lock_);
1019 CHECK(revoked_mark_stacks_.empty());
1020 }
1021 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1022 while (!gc_mark_stack_->IsEmpty()) {
1023 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1024 ProcessMarkStackRef(to_ref);
1025 ++count;
1026 }
1027 gc_mark_stack_->Reset();
1028 }
1029
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001030 // Return true if the stack was empty.
1031 return count == 0;
1032}
1033
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001034size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1035 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1036 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1037 size_t count = 0;
1038 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1039 {
1040 MutexLock mu(Thread::Current(), mark_stack_lock_);
1041 // Make a copy of the mark stack vector.
1042 mark_stacks = revoked_mark_stacks_;
1043 revoked_mark_stacks_.clear();
1044 }
1045 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1046 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1047 mirror::Object* to_ref = p->AsMirrorPtr();
1048 ProcessMarkStackRef(to_ref);
1049 ++count;
1050 }
1051 {
1052 MutexLock mu(Thread::Current(), mark_stack_lock_);
1053 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1054 // The pool has enough. Delete it.
1055 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001056 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001057 // Otherwise, put it into the pool for later reuse.
1058 mark_stack->Reset();
1059 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001060 }
1061 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001062 }
1063 return count;
1064}
1065
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001066inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001067 DCHECK(!region_space_->IsInFromSpace(to_ref));
1068 if (kUseBakerReadBarrier) {
1069 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1070 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1071 << " is_marked=" << IsMarked(to_ref);
1072 }
1073 // Scan ref fields.
1074 Scan(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001075 if (kUseBakerReadBarrier) {
1076 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1077 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1078 << " is_marked=" << IsMarked(to_ref);
1079 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001080#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1081 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1082 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1083 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001084 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1085 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001086 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001087 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001088 // We may occasionally leave a reference white in the queue if its referent happens to be
1089 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1090 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1091 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001092 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001093 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1094 ReadBarrier::GrayPtr(),
1095 ReadBarrier::WhitePtr());
1096 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001097 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001098 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001099#else
1100 DCHECK(!kUseBakerReadBarrier);
1101#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001102
1103 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1104 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1105 // GC-running thread (no synchronization required).
1106 DCHECK(region_space_bitmap_->Test(to_ref));
1107 // Disable the read barrier in SizeOf for performance, which is safe.
1108 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1109 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1110 region_space_->AddLiveBytes(to_ref, alloc_size);
1111 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001112 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001113 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001114 visitor(to_ref);
1115 }
1116}
1117
1118void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1119 Thread* self = Thread::Current();
1120 CHECK(thread_running_gc_ != nullptr);
1121 CHECK_EQ(self, thread_running_gc_);
1122 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1123 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1124 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1125 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1126 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1127 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1128 weak_ref_access_enabled_.StoreRelaxed(false);
1129 QuasiAtomic::ThreadFenceForConstructor();
1130 // Process the thread local mark stacks one last time after switching to the shared mark stack
1131 // mode and disable weak ref accesses.
1132 ProcessThreadLocalMarkStacks(true);
1133 if (kVerboseMode) {
1134 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1135 }
1136}
1137
1138void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1139 Thread* self = Thread::Current();
1140 CHECK(thread_running_gc_ != nullptr);
1141 CHECK_EQ(self, thread_running_gc_);
1142 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1143 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1144 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1145 static_cast<uint32_t>(kMarkStackModeShared));
1146 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1147 QuasiAtomic::ThreadFenceForConstructor();
1148 if (kVerboseMode) {
1149 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1150 }
1151}
1152
1153void ConcurrentCopying::CheckEmptyMarkStack() {
1154 Thread* self = Thread::Current();
1155 CHECK(thread_running_gc_ != nullptr);
1156 CHECK_EQ(self, thread_running_gc_);
1157 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1158 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1159 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1160 // Thread-local mark stack mode.
1161 RevokeThreadLocalMarkStacks(false);
1162 MutexLock mu(Thread::Current(), mark_stack_lock_);
1163 if (!revoked_mark_stacks_.empty()) {
1164 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1165 while (!mark_stack->IsEmpty()) {
1166 mirror::Object* obj = mark_stack->PopBack();
1167 if (kUseBakerReadBarrier) {
1168 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1169 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1170 << " is_marked=" << IsMarked(obj);
1171 } else {
1172 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1173 << " is_marked=" << IsMarked(obj);
1174 }
1175 }
1176 }
1177 LOG(FATAL) << "mark stack is not empty";
1178 }
1179 } else {
1180 // Shared, GC-exclusive, or off.
1181 MutexLock mu(Thread::Current(), mark_stack_lock_);
1182 CHECK(gc_mark_stack_->IsEmpty());
1183 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001184 }
1185}
1186
1187void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1188 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1189 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001190 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001191}
1192
1193void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1194 {
1195 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1196 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1197 if (kEnableFromSpaceAccountingCheck) {
1198 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1199 }
1200 heap_->MarkAllocStackAsLive(live_stack);
1201 live_stack->Reset();
1202 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001203 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001204 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1205 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1206 if (space->IsContinuousMemMapAllocSpace()) {
1207 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001208 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001209 continue;
1210 }
1211 TimingLogger::ScopedTiming split2(
1212 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1213 RecordFree(alloc_space->Sweep(swap_bitmaps));
1214 }
1215 }
1216 SweepLargeObjects(swap_bitmaps);
1217}
1218
1219void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1220 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1221 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1222}
1223
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001224void ConcurrentCopying::ReclaimPhase() {
1225 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1226 if (kVerboseMode) {
1227 LOG(INFO) << "GC ReclaimPhase";
1228 }
1229 Thread* self = Thread::Current();
1230
1231 {
1232 // Double-check that the mark stack is empty.
1233 // Note: need to set this after VerifyNoFromSpaceRef().
1234 is_asserting_to_space_invariant_ = false;
1235 QuasiAtomic::ThreadFenceForConstructor();
1236 if (kVerboseMode) {
1237 LOG(INFO) << "Issue an empty check point. ";
1238 }
1239 IssueEmptyCheckpoint();
1240 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001241 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1242 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001243 }
1244
1245 {
1246 // Record freed objects.
1247 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1248 // Don't include thread-locals that are in the to-space.
1249 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1250 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1251 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1252 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1253 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1254 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1255 if (kEnableFromSpaceAccountingCheck) {
1256 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1257 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1258 }
1259 CHECK_LE(to_objects, from_objects);
1260 CHECK_LE(to_bytes, from_bytes);
1261 int64_t freed_bytes = from_bytes - to_bytes;
1262 int64_t freed_objects = from_objects - to_objects;
1263 if (kVerboseMode) {
1264 LOG(INFO) << "RecordFree:"
1265 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1266 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1267 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1268 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1269 << " from_space size=" << region_space_->FromSpaceSize()
1270 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1271 << " to_space size=" << region_space_->ToSpaceSize();
1272 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1273 }
1274 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1275 if (kVerboseMode) {
1276 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1277 }
1278 }
1279
1280 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001281 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1282 region_space_->ClearFromSpace();
1283 }
1284
1285 {
1286 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001287 Sweep(false);
1288 SwapBitmaps();
1289 heap_->UnBindBitmaps();
1290
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001291 // Remove bitmaps for the immune spaces.
1292 while (!cc_bitmaps_.empty()) {
1293 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1294 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1295 delete cc_bitmap;
1296 cc_bitmaps_.pop_back();
1297 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001298 region_space_bitmap_ = nullptr;
1299 }
1300
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001301 CheckEmptyMarkStack();
1302
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001303 if (kVerboseMode) {
1304 LOG(INFO) << "GC end of ReclaimPhase";
1305 }
1306}
1307
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001308// Assert the to-space invariant.
1309void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1310 mirror::Object* ref) {
1311 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1312 if (is_asserting_to_space_invariant_) {
1313 if (region_space_->IsInToSpace(ref)) {
1314 // OK.
1315 return;
1316 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1317 CHECK(region_space_bitmap_->Test(ref)) << ref;
1318 } else if (region_space_->IsInFromSpace(ref)) {
1319 // Not OK. Do extra logging.
1320 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001321 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001322 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001323 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001324 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1325 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001326 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1327 }
1328 }
1329}
1330
1331class RootPrinter {
1332 public:
1333 RootPrinter() { }
1334
1335 template <class MirrorType>
1336 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001337 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001338 if (!root->IsNull()) {
1339 VisitRoot(root);
1340 }
1341 }
1342
1343 template <class MirrorType>
1344 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001345 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001346 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1347 }
1348
1349 template <class MirrorType>
1350 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001351 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001352 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1353 }
1354};
1355
1356void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1357 mirror::Object* ref) {
1358 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1359 if (is_asserting_to_space_invariant_) {
1360 if (region_space_->IsInToSpace(ref)) {
1361 // OK.
1362 return;
1363 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1364 CHECK(region_space_bitmap_->Test(ref)) << ref;
1365 } else if (region_space_->IsInFromSpace(ref)) {
1366 // Not OK. Do extra logging.
1367 if (gc_root_source == nullptr) {
1368 // No info.
1369 } else if (gc_root_source->HasArtField()) {
1370 ArtField* field = gc_root_source->GetArtField();
1371 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1372 RootPrinter root_printer;
1373 field->VisitRoots(root_printer);
1374 } else if (gc_root_source->HasArtMethod()) {
1375 ArtMethod* method = gc_root_source->GetArtMethod();
1376 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1377 RootPrinter root_printer;
Mathieu Chartier1147b9b2015-09-14 18:50:08 -07001378 method->VisitRoots(root_printer, sizeof(void*));
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001379 }
1380 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1381 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1382 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1383 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1384 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1385 } else {
1386 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1387 }
1388 }
1389}
1390
1391void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1392 if (kUseBakerReadBarrier) {
1393 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1394 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1395 } else {
1396 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1397 }
1398 if (region_space_->IsInFromSpace(obj)) {
1399 LOG(INFO) << "holder is in the from-space.";
1400 } else if (region_space_->IsInToSpace(obj)) {
1401 LOG(INFO) << "holder is in the to-space.";
1402 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1403 LOG(INFO) << "holder is in the unevac from-space.";
1404 if (region_space_bitmap_->Test(obj)) {
1405 LOG(INFO) << "holder is marked in the region space bitmap.";
1406 } else {
1407 LOG(INFO) << "holder is not marked in the region space bitmap.";
1408 }
1409 } else {
1410 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001411 if (immune_spaces_.ContainsObject(obj)) {
1412 LOG(INFO) << "holder is in an immune image or the zygote space.";
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001413 accounting::ContinuousSpaceBitmap* cc_bitmap =
1414 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1415 CHECK(cc_bitmap != nullptr)
1416 << "An immune space object must have a bitmap.";
1417 if (cc_bitmap->Test(obj)) {
1418 LOG(INFO) << "holder is marked in the bit map.";
1419 } else {
1420 LOG(INFO) << "holder is NOT marked in the bit map.";
1421 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001422 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001423 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001424 accounting::ContinuousSpaceBitmap* mark_bitmap =
1425 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1426 accounting::LargeObjectBitmap* los_bitmap =
1427 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1428 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1429 bool is_los = mark_bitmap == nullptr;
1430 if (!is_los && mark_bitmap->Test(obj)) {
1431 LOG(INFO) << "holder is marked in the mark bit map.";
1432 } else if (is_los && los_bitmap->Test(obj)) {
1433 LOG(INFO) << "holder is marked in the los bit map.";
1434 } else {
1435 // If ref is on the allocation stack, then it is considered
1436 // mark/alive (but not necessarily on the live stack.)
1437 if (IsOnAllocStack(obj)) {
1438 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001439 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001440 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001441 }
1442 }
1443 }
1444 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001445 LOG(INFO) << "offset=" << offset.SizeValue();
1446}
1447
1448void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1449 mirror::Object* ref) {
1450 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001451 if (immune_spaces_.ContainsObject(ref)) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001452 accounting::ContinuousSpaceBitmap* cc_bitmap =
1453 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1454 CHECK(cc_bitmap != nullptr)
1455 << "An immune space ref must have a bitmap. " << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001456 if (kUseBakerReadBarrier) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001457 CHECK(cc_bitmap->Test(ref))
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001458 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001459 << obj->GetReadBarrierPointer() << " ref=" << ref;
1460 } else {
1461 CHECK(cc_bitmap->Test(ref))
1462 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001463 }
1464 } else {
1465 accounting::ContinuousSpaceBitmap* mark_bitmap =
1466 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1467 accounting::LargeObjectBitmap* los_bitmap =
1468 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1469 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1470 bool is_los = mark_bitmap == nullptr;
1471 if ((!is_los && mark_bitmap->Test(ref)) ||
1472 (is_los && los_bitmap->Test(ref))) {
1473 // OK.
1474 } else {
1475 // If ref is on the allocation stack, then it may not be
1476 // marked live, but considered marked/alive (but not
1477 // necessarily on the live stack).
1478 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1479 << "obj=" << obj << " ref=" << ref;
1480 }
1481 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001482}
1483
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001484// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001485class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001486 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001487 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001488 : collector_(collector) {}
1489
1490 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001491 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1492 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001493 collector_->Process(obj, offset);
1494 }
1495
1496 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001497 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001498 CHECK(klass->IsTypeOfReferenceClass());
1499 collector_->DelayReferenceReferent(klass, ref);
1500 }
1501
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001502 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001503 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001504 SHARED_REQUIRES(Locks::mutator_lock_) {
1505 if (!root->IsNull()) {
1506 VisitRoot(root);
1507 }
1508 }
1509
1510 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001511 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001512 SHARED_REQUIRES(Locks::mutator_lock_) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001513 collector_->MarkRoot(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001514 }
1515
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001516 private:
1517 ConcurrentCopying* const collector_;
1518};
1519
1520// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001521inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001522 DCHECK(!region_space_->IsInFromSpace(to_ref));
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001523 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001524 // Disable the read barrier for a performance reason.
1525 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1526 visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001527}
1528
1529// Process a field.
1530inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001531 mirror::Object* ref = obj->GetFieldObject<
1532 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001533 mirror::Object* to_ref = Mark(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001534 if (to_ref == ref) {
1535 return;
1536 }
1537 // This may fail if the mutator writes to the field at the same time. But it's ok.
1538 mirror::Object* expected_ref = ref;
1539 mirror::Object* new_ref = to_ref;
1540 do {
1541 if (expected_ref !=
1542 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1543 // It was updated by the mutator.
1544 break;
1545 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001546 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001547 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001548}
1549
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001550// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001551inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001552 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1553 for (size_t i = 0; i < count; ++i) {
1554 mirror::Object** root = roots[i];
1555 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001556 mirror::Object* to_ref = Mark(ref);
1557 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001558 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001559 }
1560 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1561 mirror::Object* expected_ref = ref;
1562 mirror::Object* new_ref = to_ref;
1563 do {
1564 if (expected_ref != addr->LoadRelaxed()) {
1565 // It was updated by the mutator.
1566 break;
1567 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001568 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001569 }
1570}
1571
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001572inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001573 DCHECK(!root->IsNull());
1574 mirror::Object* const ref = root->AsMirrorPtr();
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001575 mirror::Object* to_ref = Mark(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001576 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001577 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1578 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1579 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001580 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001581 do {
1582 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1583 // It was updated by the mutator.
1584 break;
1585 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001586 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001587 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001588}
1589
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001590inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001591 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1592 const RootInfo& info ATTRIBUTE_UNUSED) {
1593 for (size_t i = 0; i < count; ++i) {
1594 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1595 if (!root->IsNull()) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001596 MarkRoot(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001597 }
1598 }
1599}
1600
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001601// Fill the given memory block with a dummy object. Used to fill in a
1602// copy of objects that was lost in race.
1603void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Roland Levillain14d90572015-07-16 10:52:26 +01001604 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001605 memset(dummy_obj, 0, byte_size);
1606 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1607 CHECK(int_array_class != nullptr);
1608 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1609 size_t component_size = int_array_class->GetComponentSize();
1610 CHECK_EQ(component_size, sizeof(int32_t));
1611 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1612 if (data_offset > byte_size) {
1613 // An int array is too big. Use java.lang.Object.
1614 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1615 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1616 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1617 dummy_obj->SetClass(java_lang_Object);
1618 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1619 } else {
1620 // Use an int array.
1621 dummy_obj->SetClass(int_array_class);
1622 CHECK(dummy_obj->IsArrayInstance());
1623 int32_t length = (byte_size - data_offset) / component_size;
1624 dummy_obj->AsArray()->SetLength(length);
1625 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1626 << "byte_size=" << byte_size << " length=" << length
1627 << " component_size=" << component_size << " data_offset=" << data_offset;
1628 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1629 << "byte_size=" << byte_size << " length=" << length
1630 << " component_size=" << component_size << " data_offset=" << data_offset;
1631 }
1632}
1633
1634// Reuse the memory blocks that were copy of objects that were lost in race.
1635mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1636 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001637 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001638 Thread* self = Thread::Current();
1639 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1640 MutexLock mu(self, skipped_blocks_lock_);
1641 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1642 if (it == skipped_blocks_map_.end()) {
1643 // Not found.
1644 return nullptr;
1645 }
1646 {
1647 size_t byte_size = it->first;
1648 CHECK_GE(byte_size, alloc_size);
1649 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1650 // If remainder would be too small for a dummy object, retry with a larger request size.
1651 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1652 if (it == skipped_blocks_map_.end()) {
1653 // Not found.
1654 return nullptr;
1655 }
Roland Levillain14d90572015-07-16 10:52:26 +01001656 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001657 CHECK_GE(it->first - alloc_size, min_object_size)
1658 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1659 }
1660 }
1661 // Found a block.
1662 CHECK(it != skipped_blocks_map_.end());
1663 size_t byte_size = it->first;
1664 uint8_t* addr = it->second;
1665 CHECK_GE(byte_size, alloc_size);
1666 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
Roland Levillain14d90572015-07-16 10:52:26 +01001667 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001668 if (kVerboseMode) {
1669 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1670 }
1671 skipped_blocks_map_.erase(it);
1672 memset(addr, 0, byte_size);
1673 if (byte_size > alloc_size) {
1674 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001675 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001676 CHECK_GE(byte_size - alloc_size, min_object_size);
1677 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1678 byte_size - alloc_size);
1679 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1680 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1681 }
1682 return reinterpret_cast<mirror::Object*>(addr);
1683}
1684
1685mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1686 DCHECK(region_space_->IsInFromSpace(from_ref));
1687 // No read barrier to avoid nested RB that might violate the to-space
1688 // invariant. Note that from_ref is a from space ref so the SizeOf()
1689 // call will access the from-space meta objects, but it's ok and necessary.
1690 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1691 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1692 size_t region_space_bytes_allocated = 0U;
1693 size_t non_moving_space_bytes_allocated = 0U;
1694 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001695 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001696 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001697 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001698 bytes_allocated = region_space_bytes_allocated;
1699 if (to_ref != nullptr) {
1700 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1701 }
1702 bool fall_back_to_non_moving = false;
1703 if (UNLIKELY(to_ref == nullptr)) {
1704 // Failed to allocate in the region space. Try the skipped blocks.
1705 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1706 if (to_ref != nullptr) {
1707 // Succeeded to allocate in a skipped block.
1708 if (heap_->use_tlab_) {
1709 // This is necessary for the tlab case as it's not accounted in the space.
1710 region_space_->RecordAlloc(to_ref);
1711 }
1712 bytes_allocated = region_space_alloc_size;
1713 } else {
1714 // Fall back to the non-moving space.
1715 fall_back_to_non_moving = true;
1716 if (kVerboseMode) {
1717 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1718 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1719 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1720 }
1721 fall_back_to_non_moving = true;
1722 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001723 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001724 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1725 bytes_allocated = non_moving_space_bytes_allocated;
1726 // Mark it in the mark bitmap.
1727 accounting::ContinuousSpaceBitmap* mark_bitmap =
1728 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1729 CHECK(mark_bitmap != nullptr);
1730 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1731 }
1732 }
1733 DCHECK(to_ref != nullptr);
1734
1735 // Attempt to install the forward pointer. This is in a loop as the
1736 // lock word atomic write can fail.
1737 while (true) {
1738 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1739 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001740
1741 LockWord old_lock_word = to_ref->GetLockWord(false);
1742
1743 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1744 // Lost the race. Another thread (either GC or mutator) stored
1745 // the forwarding pointer first. Make the lost copy (to_ref)
1746 // look like a valid but dead (dummy) object and keep it for
1747 // future reuse.
1748 FillWithDummyObject(to_ref, bytes_allocated);
1749 if (!fall_back_to_non_moving) {
1750 DCHECK(region_space_->IsInToSpace(to_ref));
1751 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1752 // Free the large alloc.
1753 region_space_->FreeLarge(to_ref, bytes_allocated);
1754 } else {
1755 // Record the lost copy for later reuse.
1756 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1757 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1758 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1759 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1760 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1761 reinterpret_cast<uint8_t*>(to_ref)));
1762 }
1763 } else {
1764 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1765 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1766 // Free the non-moving-space chunk.
1767 accounting::ContinuousSpaceBitmap* mark_bitmap =
1768 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1769 CHECK(mark_bitmap != nullptr);
1770 CHECK(mark_bitmap->Clear(to_ref));
1771 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1772 }
1773
1774 // Get the winner's forward ptr.
1775 mirror::Object* lost_fwd_ptr = to_ref;
1776 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1777 CHECK(to_ref != nullptr);
1778 CHECK_NE(to_ref, lost_fwd_ptr);
1779 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1780 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1781 return to_ref;
1782 }
1783
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001784 // Set the gray ptr.
1785 if (kUseBakerReadBarrier) {
1786 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1787 }
1788
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001789 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1790
1791 // Try to atomically write the fwd ptr.
1792 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1793 if (LIKELY(success)) {
1794 // The CAS succeeded.
1795 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1796 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1797 if (LIKELY(!fall_back_to_non_moving)) {
1798 DCHECK(region_space_->IsInToSpace(to_ref));
1799 } else {
1800 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1801 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1802 }
1803 if (kUseBakerReadBarrier) {
1804 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1805 }
1806 DCHECK(GetFwdPtr(from_ref) == to_ref);
1807 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001808 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001809 return to_ref;
1810 } else {
1811 // The CAS failed. It may have lost the race or may have failed
1812 // due to monitor/hashcode ops. Either way, retry.
1813 }
1814 }
1815}
1816
1817mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1818 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001819 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1820 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001821 // It's already marked.
1822 return from_ref;
1823 }
1824 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001825 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001826 to_ref = GetFwdPtr(from_ref);
1827 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1828 heap_->non_moving_space_->HasAddress(to_ref))
1829 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001830 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001831 if (region_space_bitmap_->Test(from_ref)) {
1832 to_ref = from_ref;
1833 } else {
1834 to_ref = nullptr;
1835 }
1836 } else {
1837 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001838 if (immune_spaces_.ContainsObject(from_ref)) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001839 accounting::ContinuousSpaceBitmap* cc_bitmap =
1840 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1841 DCHECK(cc_bitmap != nullptr)
1842 << "An immune space object must have a bitmap";
1843 if (kIsDebugBuild) {
1844 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1845 << "Immune space object must be already marked";
1846 }
1847 if (cc_bitmap->Test(from_ref)) {
1848 // Already marked.
1849 to_ref = from_ref;
1850 } else {
1851 // Newly marked.
1852 to_ref = nullptr;
1853 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001854 } else {
1855 // Non-immune non-moving space. Use the mark bitmap.
1856 accounting::ContinuousSpaceBitmap* mark_bitmap =
1857 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1858 accounting::LargeObjectBitmap* los_bitmap =
1859 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1860 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1861 bool is_los = mark_bitmap == nullptr;
1862 if (!is_los && mark_bitmap->Test(from_ref)) {
1863 // Already marked.
1864 to_ref = from_ref;
1865 } else if (is_los && los_bitmap->Test(from_ref)) {
1866 // Already marked in LOS.
1867 to_ref = from_ref;
1868 } else {
1869 // Not marked.
1870 if (IsOnAllocStack(from_ref)) {
1871 // If on the allocation stack, it's considered marked.
1872 to_ref = from_ref;
1873 } else {
1874 // Not marked.
1875 to_ref = nullptr;
1876 }
1877 }
1878 }
1879 }
1880 return to_ref;
1881}
1882
1883bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1884 QuasiAtomic::ThreadFenceAcquire();
1885 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001886 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001887}
1888
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001889mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
1890 // ref is in a non-moving space (from_ref == to_ref).
1891 DCHECK(!region_space_->HasAddress(ref)) << ref;
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001892 if (immune_spaces_.ContainsObject(ref)) {
1893 accounting::ContinuousSpaceBitmap* cc_bitmap =
1894 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1895 DCHECK(cc_bitmap != nullptr)
1896 << "An immune space object must have a bitmap";
1897 if (kIsDebugBuild) {
1898 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
1899 << "Immune space object must be already marked";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001900 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001901 MarkUnevacFromSpaceRegionOrImmuneSpace(ref, cc_bitmap);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001902 } else {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001903 // Use the mark bitmap.
1904 accounting::ContinuousSpaceBitmap* mark_bitmap =
1905 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1906 accounting::LargeObjectBitmap* los_bitmap =
1907 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1908 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1909 bool is_los = mark_bitmap == nullptr;
1910 if (!is_los && mark_bitmap->Test(ref)) {
1911 // Already marked.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001912 if (kUseBakerReadBarrier) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001913 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1914 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1915 }
1916 } else if (is_los && los_bitmap->Test(ref)) {
1917 // Already marked in LOS.
1918 if (kUseBakerReadBarrier) {
1919 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1920 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001921 }
1922 } else {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001923 // Not marked.
1924 if (IsOnAllocStack(ref)) {
1925 // If it's on the allocation stack, it's considered marked. Keep it white.
1926 // Objects on the allocation stack need not be marked.
1927 if (!is_los) {
1928 DCHECK(!mark_bitmap->Test(ref));
1929 } else {
1930 DCHECK(!los_bitmap->Test(ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001931 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001932 if (kUseBakerReadBarrier) {
1933 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001934 }
1935 } else {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001936 // For the baker-style RB, we need to handle 'false-gray' cases. See the
1937 // kRegionTypeUnevacFromSpace-case comment in Mark().
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001938 if (kUseBakerReadBarrier) {
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001939 // Test the bitmap first to reduce the chance of false gray cases.
1940 if ((!is_los && mark_bitmap->Test(ref)) ||
1941 (is_los && los_bitmap->Test(ref))) {
1942 return ref;
1943 }
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001944 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001945 // Not marked or on the allocation stack. Try to mark it.
1946 // This may or may not succeed, which is ok.
1947 bool cas_success = false;
1948 if (kUseBakerReadBarrier) {
1949 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
1950 ReadBarrier::GrayPtr());
1951 }
1952 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
1953 // Already marked.
1954 if (kUseBakerReadBarrier && cas_success &&
1955 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
1956 PushOntoFalseGrayStack(ref);
1957 }
1958 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
1959 // Already marked in LOS.
1960 if (kUseBakerReadBarrier && cas_success &&
1961 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
1962 PushOntoFalseGrayStack(ref);
1963 }
1964 } else {
1965 // Newly marked.
1966 if (kUseBakerReadBarrier) {
1967 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
1968 }
1969 PushOntoMarkStack(ref);
1970 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001971 }
1972 }
1973 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001974 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001975}
1976
1977void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001978 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001979 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001980 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001981 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
1982 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001983 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001984 {
1985 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1986 skipped_blocks_map_.clear();
1987 }
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001988 ReaderMutexLock mu(self, *Locks::mutator_lock_);
1989 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001990 heap_->ClearMarkedObjects();
1991}
1992
Mathieu Chartier97509952015-07-13 14:35:43 -07001993bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001994 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07001995 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001996 if (to_ref == nullptr) {
1997 return false;
1998 }
1999 if (from_ref != to_ref) {
2000 QuasiAtomic::ThreadFenceRelease();
2001 field->Assign(to_ref);
2002 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2003 }
2004 return true;
2005}
2006
Mathieu Chartier97509952015-07-13 14:35:43 -07002007mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2008 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002009}
2010
2011void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002012 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002013}
2014
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002015void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002016 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002017 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002018 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2019 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002020 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002021}
2022
2023void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2024 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2025 region_space_->RevokeAllThreadLocalBuffers();
2026}
2027
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002028} // namespace collector
2029} // namespace gc
2030} // namespace art