blob: 651669e3254f0372b2c2e4431f7401ded8510fe0 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070020#include "base/enums.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070021#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070022#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070023#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070024#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070026#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070028#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080030#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080031#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070033#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080034#include "mirror/object-inl.h"
35#include "scoped_thread_state_change.h"
36#include "thread-inl.h"
37#include "thread_list.h"
38#include "well_known_classes.h"
39
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070040namespace art {
41namespace gc {
42namespace collector {
43
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070044static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070045// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
46// union table. Disabled since it does not seem to help the pause much.
47static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070048// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
49// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
50// Only enabled for kIsDebugBuild to avoid performance hit.
51static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070052// Slow path mark stack size, increase this if the stack is getting full and it is causing
53// performance problems.
54static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070055
Mathieu Chartier56fe2582016-07-14 13:30:03 -070056ConcurrentCopying::ConcurrentCopying(Heap* heap,
57 const std::string& name_prefix,
58 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080059 : GarbageCollector(heap,
60 name_prefix + (name_prefix.empty() ? "" : " ") +
61 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070062 region_space_(nullptr), gc_barrier_(new Barrier(0)),
63 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070064 kDefaultGcMarkStackSize,
65 kDefaultGcMarkStackSize)),
Mathieu Chartier36a270a2016-07-28 18:08:51 -070066 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
67 kReadBarrierMarkStackSize,
68 kReadBarrierMarkStackSize)),
69 rb_mark_bit_stack_full_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
71 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080072 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070073 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070074 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
75 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070077 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
78 rb_slow_path_ns_(0),
79 rb_slow_path_count_(0),
80 rb_slow_path_count_gc_(0),
81 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
82 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
83 rb_slow_path_count_total_(0),
84 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080085 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070086 force_evacuate_all_(false),
87 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
88 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
90 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070091 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080092 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
94 // Cache this so that we won't have to lock heap_bitmap_lock_ in
95 // Mark() which could cause a nested lock on heap_bitmap_lock_
96 // when GC causes a RB while doing GC or a lock order violation
97 // (class_linker_lock_ and heap_bitmap_lock_).
98 heap_mark_bitmap_ = heap->GetMarkBitmap();
99 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700100 {
101 MutexLock mu(self, mark_stack_lock_);
102 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
103 accounting::AtomicStack<mirror::Object>* mark_stack =
104 accounting::AtomicStack<mirror::Object>::Create(
105 "thread local mark stack", kMarkStackSize, kMarkStackSize);
106 pooled_mark_stacks_.push_back(mark_stack);
107 }
108 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800109}
110
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700111void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
112 // Used for preserving soft references, should be OK to not have a CAS here since there should be
113 // no other threads which can trigger read barriers on the same referent during reference
114 // processing.
115 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700116 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700117}
118
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800119ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700120 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800121}
122
123void ConcurrentCopying::RunPhases() {
124 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
125 CHECK(!is_active_);
126 is_active_ = true;
127 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700128 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800129 Locks::mutator_lock_->AssertNotHeld(self);
130 {
131 ReaderMutexLock mu(self, *Locks::mutator_lock_);
132 InitializePhase();
133 }
134 FlipThreadRoots();
135 {
136 ReaderMutexLock mu(self, *Locks::mutator_lock_);
137 MarkingPhase();
138 }
139 // Verify no from space refs. This causes a pause.
140 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
141 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
142 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700143 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800144 if (kVerboseMode) {
145 LOG(INFO) << "Verifying no from-space refs";
146 }
147 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700148 if (kVerboseMode) {
149 LOG(INFO) << "Done verifying no from-space refs";
150 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700151 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800152 }
153 {
154 ReaderMutexLock mu(self, *Locks::mutator_lock_);
155 ReclaimPhase();
156 }
157 FinishPhase();
158 CHECK(is_active_);
159 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700160 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800161}
162
163void ConcurrentCopying::BindBitmaps() {
164 Thread* self = Thread::Current();
165 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
166 // Mark all of the spaces we never collect as immune.
167 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800168 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
169 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800170 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800171 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172 } else if (space == region_space_) {
173 accounting::ContinuousSpaceBitmap* bitmap =
174 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
175 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800176 region_space_bitmap_ = bitmap;
177 }
178 }
179}
180
181void ConcurrentCopying::InitializePhase() {
182 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
183 if (kVerboseMode) {
184 LOG(INFO) << "GC InitializePhase";
185 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
186 << reinterpret_cast<void*>(region_space_->Limit());
187 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700188 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800189 if (kIsDebugBuild) {
190 MutexLock mu(Thread::Current(), mark_stack_lock_);
191 CHECK(false_gray_stack_.empty());
192 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700193
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700194 rb_mark_bit_stack_full_ = false;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700195 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
196 if (measure_read_barrier_slow_path_) {
197 rb_slow_path_ns_.StoreRelaxed(0);
198 rb_slow_path_count_.StoreRelaxed(0);
199 rb_slow_path_count_gc_.StoreRelaxed(0);
200 }
201
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800202 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800203 bytes_moved_.StoreRelaxed(0);
204 objects_moved_.StoreRelaxed(0);
205 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
206 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
207 GetCurrentIteration()->GetClearSoftReferences()) {
208 force_evacuate_all_ = true;
209 } else {
210 force_evacuate_all_ = false;
211 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700212 if (kUseBakerReadBarrier) {
213 updated_all_immune_objects_.StoreRelaxed(false);
214 // GC may gray immune objects in the thread flip.
215 gc_grays_immune_objects_ = true;
216 if (kIsDebugBuild) {
217 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
218 DCHECK(immune_gray_stack_.empty());
219 }
220 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800221 BindBitmaps();
222 if (kVerboseMode) {
223 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800224 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
225 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
226 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
227 LOG(INFO) << "Immune space: " << *space;
228 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229 LOG(INFO) << "GC end of InitializePhase";
230 }
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700231 // Mark all of the zygote large objects without graying them.
232 MarkZygoteLargeObjects();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800233}
234
235// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700236class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800237 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100238 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800239 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
240 }
241
Mathieu Chartier90443472015-07-16 20:32:27 -0700242 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800243 // Note: self is not necessarily equal to thread since thread may be suspended.
244 Thread* self = Thread::Current();
245 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
246 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700247 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800248 if (use_tlab_ && thread->HasTlab()) {
249 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
250 // This must come before the revoke.
251 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
252 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
253 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
254 FetchAndAddSequentiallyConsistent(thread_local_objects);
255 } else {
256 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
257 }
258 }
259 if (kUseThreadLocalAllocationStack) {
260 thread->RevokeThreadLocalAllocationStack();
261 }
262 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700263 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
264 // only.
265 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800266 concurrent_copying_->GetBarrier().Pass(self);
267 }
268
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700269 void VisitRoots(mirror::Object*** roots,
270 size_t count,
271 const RootInfo& info ATTRIBUTE_UNUSED)
272 SHARED_REQUIRES(Locks::mutator_lock_) {
273 for (size_t i = 0; i < count; ++i) {
274 mirror::Object** root = roots[i];
275 mirror::Object* ref = *root;
276 if (ref != nullptr) {
277 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
278 if (to_ref != ref) {
279 *root = to_ref;
280 }
281 }
282 }
283 }
284
285 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
286 size_t count,
287 const RootInfo& info ATTRIBUTE_UNUSED)
288 SHARED_REQUIRES(Locks::mutator_lock_) {
289 for (size_t i = 0; i < count; ++i) {
290 mirror::CompressedReference<mirror::Object>* const root = roots[i];
291 if (!root->IsNull()) {
292 mirror::Object* ref = root->AsMirrorPtr();
293 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
294 if (to_ref != ref) {
295 root->Assign(to_ref);
296 }
297 }
298 }
299 }
300
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800301 private:
302 ConcurrentCopying* const concurrent_copying_;
303 const bool use_tlab_;
304};
305
306// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700307class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800308 public:
309 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
310 : concurrent_copying_(concurrent_copying) {
311 }
312
Mathieu Chartier90443472015-07-16 20:32:27 -0700313 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800314 ConcurrentCopying* cc = concurrent_copying_;
315 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
316 // Note: self is not necessarily equal to thread since thread may be suspended.
317 Thread* self = Thread::Current();
318 CHECK(thread == self);
319 Locks::mutator_lock_->AssertExclusiveHeld(self);
320 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700321 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800322 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
323 cc->RecordLiveStackFreezeSize(self);
324 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
325 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
326 }
327 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700328 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800329 if (kIsDebugBuild) {
330 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
331 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800332 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800333 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800334 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700335 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700337 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
338 cc->GrayAllDirtyImmuneObjects();
339 if (kIsDebugBuild) {
340 // Check that all non-gray immune objects only refernce immune objects.
341 cc->VerifyGrayImmuneObjects();
342 }
343 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800344 }
345
346 private:
347 ConcurrentCopying* const concurrent_copying_;
348};
349
Mathieu Chartier21328a12016-07-22 10:47:45 -0700350class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
351 public:
352 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
353 : collector_(collector) {}
354
355 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
356 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
357 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
358 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
359 obj, offset);
360 }
361
362 void operator()(mirror::Class* klass, mirror::Reference* ref) const
363 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
364 CHECK(klass->IsTypeOfReferenceClass());
365 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
366 ref,
367 mirror::Reference::ReferentOffset());
368 }
369
370 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
371 ALWAYS_INLINE
372 SHARED_REQUIRES(Locks::mutator_lock_) {
373 if (!root->IsNull()) {
374 VisitRoot(root);
375 }
376 }
377
378 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
379 ALWAYS_INLINE
380 SHARED_REQUIRES(Locks::mutator_lock_) {
381 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
382 }
383
384 private:
385 ConcurrentCopying* const collector_;
386
387 void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
388 SHARED_REQUIRES(Locks::mutator_lock_) {
389 if (ref != nullptr) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700390 if (!collector_->immune_spaces_.ContainsObject(ref)) {
391 // Not immune, must be a zygote large object.
392 CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
393 Thread::Current(), ref))
394 << "Non gray object references non immune, non zygote large object "<< ref << " "
395 << PrettyTypeOf(ref) << " in holder " << holder << " " << PrettyTypeOf(holder)
396 << " offset=" << offset.Uint32Value();
397 } else {
398 // Make sure the large object class is immune since we will never scan the large object.
399 CHECK(collector_->immune_spaces_.ContainsObject(
400 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
401 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700402 }
403 }
404};
405
406void ConcurrentCopying::VerifyGrayImmuneObjects() {
407 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
408 for (auto& space : immune_spaces_.GetSpaces()) {
409 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
410 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
411 VerifyGrayImmuneObjectsVisitor visitor(this);
412 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
413 reinterpret_cast<uintptr_t>(space->Limit()),
414 [&visitor](mirror::Object* obj)
415 SHARED_REQUIRES(Locks::mutator_lock_) {
416 // If an object is not gray, it should only have references to things in the immune spaces.
417 if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
418 obj->VisitReferences</*kVisitNativeRoots*/true,
419 kDefaultVerifyFlags,
420 kWithoutReadBarrier>(visitor, visitor);
421 }
422 });
423 }
424}
425
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800426// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
427void ConcurrentCopying::FlipThreadRoots() {
428 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
429 if (kVerboseMode) {
430 LOG(INFO) << "time=" << region_space_->Time();
431 region_space_->DumpNonFreeRegions(LOG(INFO));
432 }
433 Thread* self = Thread::Current();
434 Locks::mutator_lock_->AssertNotHeld(self);
435 gc_barrier_->Init(self, 0);
436 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
437 FlipCallback flip_callback(this);
438 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
439 &thread_flip_visitor, &flip_callback, this);
440 {
441 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
442 gc_barrier_->Increment(self, barrier_count);
443 }
444 is_asserting_to_space_invariant_ = true;
445 QuasiAtomic::ThreadFenceForConstructor();
446 if (kVerboseMode) {
447 LOG(INFO) << "time=" << region_space_->Time();
448 region_space_->DumpNonFreeRegions(LOG(INFO));
449 LOG(INFO) << "GC end of FlipThreadRoots";
450 }
451}
452
Mathieu Chartier21328a12016-07-22 10:47:45 -0700453class ConcurrentCopying::GrayImmuneObjectVisitor {
454 public:
455 explicit GrayImmuneObjectVisitor() {}
456
457 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
458 if (kUseBakerReadBarrier) {
459 if (kIsDebugBuild) {
460 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
461 }
462 obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
463 }
464 }
465
466 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
467 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
468 }
469};
470
471void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
472 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
473 gc::Heap* const heap = Runtime::Current()->GetHeap();
474 accounting::CardTable* const card_table = heap->GetCardTable();
475 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
476 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
477 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
478 GrayImmuneObjectVisitor visitor;
479 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
480 // Mark all the objects on dirty cards since these may point to objects in other space.
481 // Once these are marked, the GC will eventually clear them later.
482 // Table is non null for boot image and zygote spaces. It is only null for application image
483 // spaces.
484 if (table != nullptr) {
485 // TODO: Add preclean outside the pause.
486 table->ClearCards();
487 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
488 } else {
489 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
490 // pause because app image spaces are all dirty pages anyways.
491 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
492 }
493 }
494 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
495 // barriers in the immune spaces.
496 updated_all_immune_objects_.StoreRelaxed(true);
497}
498
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700499void ConcurrentCopying::SwapStacks() {
500 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800501}
502
503void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
504 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
505 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
506}
507
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800508class EmptyCheckpoint : public Closure {
509 public:
510 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
511 : concurrent_copying_(concurrent_copying) {
512 }
513
514 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
515 // Note: self is not necessarily equal to thread since thread may be suspended.
516 Thread* self = Thread::Current();
517 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
518 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800519 // If thread is a running mutator, then act on behalf of the garbage collector.
520 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700521 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800522 }
523
524 private:
525 ConcurrentCopying* const concurrent_copying_;
526};
527
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700528// Used to visit objects in the immune spaces.
529inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
530 DCHECK(obj != nullptr);
531 DCHECK(immune_spaces_.ContainsObject(obj));
532 // Update the fields without graying it or pushing it onto the mark stack.
533 Scan(obj);
534}
535
536class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
537 public:
538 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
539 : collector_(cc) {}
540
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700541 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700542 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
543 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
544 collector_->ScanImmuneObject(obj);
545 // Done scanning the object, go back to white.
546 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
547 ReadBarrier::WhitePtr());
548 CHECK(success);
549 }
550 } else {
551 collector_->ScanImmuneObject(obj);
552 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700553 }
554
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700555 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
556 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
557 }
558
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700559 private:
560 ConcurrentCopying* const collector_;
561};
562
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800563// Concurrently mark roots that are guarded by read barriers and process the mark stack.
564void ConcurrentCopying::MarkingPhase() {
565 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
566 if (kVerboseMode) {
567 LOG(INFO) << "GC MarkingPhase";
568 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700569 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700570
571 // Scan immune spaces.
572 // Update all the fields in the immune spaces first without graying the objects so that we
573 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
574 // of the objects.
575 if (kUseBakerReadBarrier) {
576 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700577 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700578 {
579 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
580 for (auto& space : immune_spaces_.GetSpaces()) {
581 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
582 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700583 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700584 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700585 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
586 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
587 } else {
588 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
589 reinterpret_cast<uintptr_t>(space->Limit()),
590 visitor);
591 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700592 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700593 }
594 if (kUseBakerReadBarrier) {
595 // This release fence makes the field updates in the above loop visible before allowing mutator
596 // getting access to immune objects without graying it first.
597 updated_all_immune_objects_.StoreRelease(true);
598 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
599 // the above loop because we would incorrectly disable the read barrier by whitening an object
600 // which may point to an unscanned, white object, breaking the to-space invariant.
601 //
602 // Make sure no mutators are in the middle of marking an immune object before whitening immune
603 // objects.
604 IssueEmptyCheckpoint();
605 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
606 if (kVerboseMode) {
607 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
608 }
609 for (mirror::Object* obj : immune_gray_stack_) {
610 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
611 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
612 ReadBarrier::WhitePtr());
613 DCHECK(success);
614 }
615 immune_gray_stack_.clear();
616 }
617
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800618 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700619 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
620 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800621 }
622 {
623 // TODO: don't visit the transaction roots if it's not active.
624 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700625 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800626 }
627
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628 Thread* self = Thread::Current();
629 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700630 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700631 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
632 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
633 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
634 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
635 // reach the point where we process weak references, we can avoid using a lock when accessing
636 // the GC mark stack, which makes mark stack processing more efficient.
637
638 // Process the mark stack once in the thread local stack mode. This marks most of the live
639 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
640 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
641 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800642 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700643 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
644 // for the last time before transitioning to the shared mark stack mode, which would process new
645 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
646 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
647 // important to do these together in a single checkpoint so that we can ensure that mutators
648 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
649 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
650 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
651 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
652 SwitchToSharedMarkStackMode();
653 CHECK(!self->GetWeakRefAccessEnabled());
654 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
655 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
656 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
657 // (via read barriers) have no way to produce any more refs to process. Marking converges once
658 // before we process weak refs below.
659 ProcessMarkStack();
660 CheckEmptyMarkStack();
661 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
662 // lock from this point on.
663 SwitchToGcExclusiveMarkStackMode();
664 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800665 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800666 LOG(INFO) << "ProcessReferences";
667 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700668 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700669 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700670 ProcessReferences(self);
671 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800672 if (kVerboseMode) {
673 LOG(INFO) << "SweepSystemWeaks";
674 }
675 SweepSystemWeaks(self);
676 if (kVerboseMode) {
677 LOG(INFO) << "SweepSystemWeaks done";
678 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700679 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
680 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
681 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800682 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700683 CheckEmptyMarkStack();
684 // Re-enable weak ref accesses.
685 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700686 // Free data for class loaders that we unloaded.
687 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700688 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700689 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800690 if (kUseBakerReadBarrier) {
691 ProcessFalseGrayStack();
692 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700693 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800694 }
695
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700696 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800697 if (kVerboseMode) {
698 LOG(INFO) << "GC end of MarkingPhase";
699 }
700}
701
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700702void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
703 if (kVerboseMode) {
704 LOG(INFO) << "ReenableWeakRefAccess";
705 }
706 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
707 QuasiAtomic::ThreadFenceForConstructor();
708 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
709 {
710 MutexLock mu(self, *Locks::thread_list_lock_);
711 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
712 for (Thread* thread : thread_list) {
713 thread->SetWeakRefAccessEnabled(true);
714 }
715 }
716 // Unblock blocking threads.
717 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
718 Runtime::Current()->BroadcastForNewSystemWeaks();
719}
720
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700721class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700722 public:
723 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
724 : concurrent_copying_(concurrent_copying) {
725 }
726
727 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
728 // Note: self is not necessarily equal to thread since thread may be suspended.
729 Thread* self = Thread::Current();
730 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
731 << thread->GetState() << " thread " << thread << " self " << self;
732 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700733 // Note a thread that has just started right before this checkpoint may have already this flag
734 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700735 thread->SetIsGcMarking(false);
736 // If thread is a running mutator, then act on behalf of the garbage collector.
737 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700738 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700739 }
740
741 private:
742 ConcurrentCopying* const concurrent_copying_;
743};
744
745void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
746 Thread* self = Thread::Current();
747 DisableMarkingCheckpoint check_point(this);
748 ThreadList* thread_list = Runtime::Current()->GetThreadList();
749 gc_barrier_->Init(self, 0);
750 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
751 // If there are no threads to wait which implies that all the checkpoint functions are finished,
752 // then no need to release the mutator lock.
753 if (barrier_count == 0) {
754 return;
755 }
756 // Release locks then wait for all mutator threads to pass the barrier.
757 Locks::mutator_lock_->SharedUnlock(self);
758 {
759 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
760 gc_barrier_->Increment(self, barrier_count);
761 }
762 Locks::mutator_lock_->SharedLock(self);
763}
764
765void ConcurrentCopying::DisableMarking() {
766 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
767 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
768 is_marking_ = false;
769 QuasiAtomic::ThreadFenceForConstructor();
770 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
771 // still in the middle of a read barrier which may have a from-space ref cached in a local
772 // variable.
773 IssueDisableMarkingCheckpoint();
774 if (kUseTableLookupReadBarrier) {
775 heap_->rb_table_->ClearAll();
776 DCHECK(heap_->rb_table_->IsAllCleared());
777 }
778 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
779 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
780}
781
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800782void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
783 CHECK(kUseBakerReadBarrier);
784 DCHECK(ref != nullptr);
785 MutexLock mu(Thread::Current(), mark_stack_lock_);
786 false_gray_stack_.push_back(ref);
787}
788
789void ConcurrentCopying::ProcessFalseGrayStack() {
790 CHECK(kUseBakerReadBarrier);
791 // Change the objects on the false gray stack from gray to white.
792 MutexLock mu(Thread::Current(), mark_stack_lock_);
793 for (mirror::Object* obj : false_gray_stack_) {
794 DCHECK(IsMarked(obj));
795 // The object could be white here if a thread got preempted after a success at the
796 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
797 // still gray), and the thread ran to register it onto the false gray stack.
798 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
799 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
800 ReadBarrier::WhitePtr());
801 DCHECK(success);
802 }
803 }
804 false_gray_stack_.clear();
805}
806
807
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800808void ConcurrentCopying::IssueEmptyCheckpoint() {
809 Thread* self = Thread::Current();
810 EmptyCheckpoint check_point(this);
811 ThreadList* thread_list = Runtime::Current()->GetThreadList();
812 gc_barrier_->Init(self, 0);
813 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800814 // If there are no threads to wait which implys that all the checkpoint functions are finished,
815 // then no need to release the mutator lock.
816 if (barrier_count == 0) {
817 return;
818 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800819 // Release locks then wait for all mutator threads to pass the barrier.
820 Locks::mutator_lock_->SharedUnlock(self);
821 {
822 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
823 gc_barrier_->Increment(self, barrier_count);
824 }
825 Locks::mutator_lock_->SharedLock(self);
826}
827
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700828void ConcurrentCopying::ExpandGcMarkStack() {
829 DCHECK(gc_mark_stack_->IsFull());
830 const size_t new_size = gc_mark_stack_->Capacity() * 2;
831 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
832 gc_mark_stack_->End());
833 gc_mark_stack_->Resize(new_size);
834 for (auto& ref : temp) {
835 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
836 }
837 DCHECK(!gc_mark_stack_->IsFull());
838}
839
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800840void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700841 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800842 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700843 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
844 CHECK(thread_running_gc_ != nullptr);
845 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700846 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
847 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700848 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
849 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700850 if (UNLIKELY(gc_mark_stack_->IsFull())) {
851 ExpandGcMarkStack();
852 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700853 gc_mark_stack_->PushBack(to_ref);
854 } else {
855 // Otherwise, use a thread-local mark stack.
856 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
857 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
858 MutexLock mu(self, mark_stack_lock_);
859 // Get a new thread local mark stack.
860 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
861 if (!pooled_mark_stacks_.empty()) {
862 // Use a pooled mark stack.
863 new_tl_mark_stack = pooled_mark_stacks_.back();
864 pooled_mark_stacks_.pop_back();
865 } else {
866 // None pooled. Create a new one.
867 new_tl_mark_stack =
868 accounting::AtomicStack<mirror::Object>::Create(
869 "thread local mark stack", 4 * KB, 4 * KB);
870 }
871 DCHECK(new_tl_mark_stack != nullptr);
872 DCHECK(new_tl_mark_stack->IsEmpty());
873 new_tl_mark_stack->PushBack(to_ref);
874 self->SetThreadLocalMarkStack(new_tl_mark_stack);
875 if (tl_mark_stack != nullptr) {
876 // Store the old full stack into a vector.
877 revoked_mark_stacks_.push_back(tl_mark_stack);
878 }
879 } else {
880 tl_mark_stack->PushBack(to_ref);
881 }
882 }
883 } else if (mark_stack_mode == kMarkStackModeShared) {
884 // Access the shared GC mark stack with a lock.
885 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700886 if (UNLIKELY(gc_mark_stack_->IsFull())) {
887 ExpandGcMarkStack();
888 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700889 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800890 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700891 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700892 static_cast<uint32_t>(kMarkStackModeGcExclusive))
893 << "ref=" << to_ref
894 << " self->gc_marking=" << self->GetIsGcMarking()
895 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700896 CHECK(self == thread_running_gc_)
897 << "Only GC-running thread should access the mark stack "
898 << "in the GC exclusive mark stack mode";
899 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700900 if (UNLIKELY(gc_mark_stack_->IsFull())) {
901 ExpandGcMarkStack();
902 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700903 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800904 }
905}
906
907accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
908 return heap_->allocation_stack_.get();
909}
910
911accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
912 return heap_->live_stack_.get();
913}
914
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800915// The following visitors are used to verify that there's no references to the from-space left after
916// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700917class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800918 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700919 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800920 : collector_(collector) {}
921
922 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700923 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800924 if (ref == nullptr) {
925 // OK.
926 return;
927 }
928 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
929 if (kUseBakerReadBarrier) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700930 CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800931 << "Ref " << ref << " " << PrettyTypeOf(ref)
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700932 << " has non-white rb_ptr ";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800933 }
934 }
935
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700936 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700937 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800938 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700939 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800940 }
941
942 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700943 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800944};
945
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700946class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800947 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700948 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800949 : collector_(collector) {}
950
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700951 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700952 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800953 mirror::Object* ref =
954 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700955 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800956 visitor(ref);
957 }
958 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700959 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800960 CHECK(klass->IsTypeOfReferenceClass());
961 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
962 }
963
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700964 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
965 SHARED_REQUIRES(Locks::mutator_lock_) {
966 if (!root->IsNull()) {
967 VisitRoot(root);
968 }
969 }
970
971 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
972 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700973 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700974 visitor(root->AsMirrorPtr());
975 }
976
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800977 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700978 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800979};
980
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700981class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800982 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700983 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800984 : collector_(collector) {}
985 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700986 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800987 ObjectCallback(obj, collector_);
988 }
989 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700990 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800991 CHECK(obj != nullptr);
992 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
993 space::RegionSpace* region_space = collector->RegionSpace();
994 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700995 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700996 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800997 if (kUseBakerReadBarrier) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700998 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800999 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001000 }
1001 }
1002
1003 private:
1004 ConcurrentCopying* const collector_;
1005};
1006
1007// Verify there's no from-space references left after the marking phase.
1008void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1009 Thread* self = Thread::Current();
1010 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001011 // Verify all threads have is_gc_marking to be false
1012 {
1013 MutexLock mu(self, *Locks::thread_list_lock_);
1014 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1015 for (Thread* thread : thread_list) {
1016 CHECK(!thread->GetIsGcMarking());
1017 }
1018 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001019 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001020 // Roots.
1021 {
1022 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001023 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001024 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001025 }
1026 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001027 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001028 // Non-moving spaces.
1029 {
1030 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1031 heap_->GetMarkBitmap()->Visit(visitor);
1032 }
1033 // The alloc stack.
1034 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001035 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001036 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1037 it < end; ++it) {
1038 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001039 if (obj != nullptr && obj->GetClass() != nullptr) {
1040 // TODO: need to call this only if obj is alive?
1041 ref_visitor(obj);
1042 visitor(obj);
1043 }
1044 }
1045 }
1046 // TODO: LOS. But only refs in LOS are classes.
1047}
1048
1049// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001050class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001051 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001052 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001053 : collector_(collector) {}
1054
1055 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001056 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001057 if (ref == nullptr) {
1058 // OK.
1059 return;
1060 }
1061 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1062 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001063
1064 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001065 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001066};
1067
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001068class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001069 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001070 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001071 : collector_(collector) {}
1072
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001073 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001074 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001075 mirror::Object* ref =
1076 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001077 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001078 visitor(ref);
1079 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001080 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001081 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001082 CHECK(klass->IsTypeOfReferenceClass());
1083 }
1084
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001085 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1086 SHARED_REQUIRES(Locks::mutator_lock_) {
1087 if (!root->IsNull()) {
1088 VisitRoot(root);
1089 }
1090 }
1091
1092 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1093 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001094 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001095 visitor(root->AsMirrorPtr());
1096 }
1097
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001098 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001099 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001100};
1101
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001102class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001103 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001104 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001105 : collector_(collector) {}
1106 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001107 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001108 ObjectCallback(obj, collector_);
1109 }
1110 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001111 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001112 CHECK(obj != nullptr);
1113 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1114 space::RegionSpace* region_space = collector->RegionSpace();
1115 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1116 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001117 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001118 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001119 }
1120
1121 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001122 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001123};
1124
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001125class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001126 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001127 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1128 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001129 : concurrent_copying_(concurrent_copying),
1130 disable_weak_ref_access_(disable_weak_ref_access) {
1131 }
1132
1133 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1134 // Note: self is not necessarily equal to thread since thread may be suspended.
1135 Thread* self = Thread::Current();
1136 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1137 << thread->GetState() << " thread " << thread << " self " << self;
1138 // Revoke thread local mark stacks.
1139 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1140 if (tl_mark_stack != nullptr) {
1141 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1142 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1143 thread->SetThreadLocalMarkStack(nullptr);
1144 }
1145 // Disable weak ref access.
1146 if (disable_weak_ref_access_) {
1147 thread->SetWeakRefAccessEnabled(false);
1148 }
1149 // If thread is a running mutator, then act on behalf of the garbage collector.
1150 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001151 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001152 }
1153
1154 private:
1155 ConcurrentCopying* const concurrent_copying_;
1156 const bool disable_weak_ref_access_;
1157};
1158
1159void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
1160 Thread* self = Thread::Current();
1161 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1162 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1163 gc_barrier_->Init(self, 0);
1164 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1165 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1166 // then no need to release the mutator lock.
1167 if (barrier_count == 0) {
1168 return;
1169 }
1170 Locks::mutator_lock_->SharedUnlock(self);
1171 {
1172 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1173 gc_barrier_->Increment(self, barrier_count);
1174 }
1175 Locks::mutator_lock_->SharedLock(self);
1176}
1177
1178void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1179 Thread* self = Thread::Current();
1180 CHECK_EQ(self, thread);
1181 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1182 if (tl_mark_stack != nullptr) {
1183 CHECK(is_marking_);
1184 MutexLock mu(self, mark_stack_lock_);
1185 revoked_mark_stacks_.push_back(tl_mark_stack);
1186 thread->SetThreadLocalMarkStack(nullptr);
1187 }
1188}
1189
1190void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001191 if (kVerboseMode) {
1192 LOG(INFO) << "ProcessMarkStack. ";
1193 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001194 bool empty_prev = false;
1195 while (true) {
1196 bool empty = ProcessMarkStackOnce();
1197 if (empty_prev && empty) {
1198 // Saw empty mark stack for a second time, done.
1199 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001200 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001201 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001202 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001203}
1204
1205bool ConcurrentCopying::ProcessMarkStackOnce() {
1206 Thread* self = Thread::Current();
1207 CHECK(thread_running_gc_ != nullptr);
1208 CHECK(self == thread_running_gc_);
1209 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1210 size_t count = 0;
1211 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1212 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1213 // Process the thread-local mark stacks and the GC mark stack.
1214 count += ProcessThreadLocalMarkStacks(false);
1215 while (!gc_mark_stack_->IsEmpty()) {
1216 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1217 ProcessMarkStackRef(to_ref);
1218 ++count;
1219 }
1220 gc_mark_stack_->Reset();
1221 } else if (mark_stack_mode == kMarkStackModeShared) {
1222 // Process the shared GC mark stack with a lock.
1223 {
1224 MutexLock mu(self, mark_stack_lock_);
1225 CHECK(revoked_mark_stacks_.empty());
1226 }
1227 while (true) {
1228 std::vector<mirror::Object*> refs;
1229 {
1230 // Copy refs with lock. Note the number of refs should be small.
1231 MutexLock mu(self, mark_stack_lock_);
1232 if (gc_mark_stack_->IsEmpty()) {
1233 break;
1234 }
1235 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1236 p != gc_mark_stack_->End(); ++p) {
1237 refs.push_back(p->AsMirrorPtr());
1238 }
1239 gc_mark_stack_->Reset();
1240 }
1241 for (mirror::Object* ref : refs) {
1242 ProcessMarkStackRef(ref);
1243 ++count;
1244 }
1245 }
1246 } else {
1247 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1248 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1249 {
1250 MutexLock mu(self, mark_stack_lock_);
1251 CHECK(revoked_mark_stacks_.empty());
1252 }
1253 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1254 while (!gc_mark_stack_->IsEmpty()) {
1255 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1256 ProcessMarkStackRef(to_ref);
1257 ++count;
1258 }
1259 gc_mark_stack_->Reset();
1260 }
1261
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001262 // Return true if the stack was empty.
1263 return count == 0;
1264}
1265
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001266size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1267 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1268 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1269 size_t count = 0;
1270 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1271 {
1272 MutexLock mu(Thread::Current(), mark_stack_lock_);
1273 // Make a copy of the mark stack vector.
1274 mark_stacks = revoked_mark_stacks_;
1275 revoked_mark_stacks_.clear();
1276 }
1277 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1278 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1279 mirror::Object* to_ref = p->AsMirrorPtr();
1280 ProcessMarkStackRef(to_ref);
1281 ++count;
1282 }
1283 {
1284 MutexLock mu(Thread::Current(), mark_stack_lock_);
1285 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1286 // The pool has enough. Delete it.
1287 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001288 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001289 // Otherwise, put it into the pool for later reuse.
1290 mark_stack->Reset();
1291 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001292 }
1293 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001294 }
1295 return count;
1296}
1297
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001298inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001299 DCHECK(!region_space_->IsInFromSpace(to_ref));
1300 if (kUseBakerReadBarrier) {
1301 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1302 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1303 << " is_marked=" << IsMarked(to_ref);
1304 }
Mathieu Chartierc381c362016-08-23 13:27:53 -07001305 bool add_to_live_bytes = false;
1306 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1307 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
1308 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
1309 // It may be already marked if we accidentally pushed the same object twice due to the racy
1310 // bitmap read in MarkUnevacFromSpaceRegion.
1311 Scan(to_ref);
1312 // Only add to the live bytes if the object was not already marked.
1313 add_to_live_bytes = true;
1314 }
1315 } else {
1316 Scan(to_ref);
1317 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001318 if (kUseBakerReadBarrier) {
1319 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1320 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1321 << " is_marked=" << IsMarked(to_ref);
1322 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001323#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1324 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1325 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1326 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001327 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1328 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001329 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001330 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001331 // We may occasionally leave a reference white in the queue if its referent happens to be
1332 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1333 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1334 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001335 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001336 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1337 ReadBarrier::GrayPtr(),
1338 ReadBarrier::WhitePtr());
1339 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001340 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001341 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001342#else
1343 DCHECK(!kUseBakerReadBarrier);
1344#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001345
Mathieu Chartierc381c362016-08-23 13:27:53 -07001346 if (add_to_live_bytes) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001347 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1348 // GC-running thread (no synchronization required).
1349 DCHECK(region_space_bitmap_->Test(to_ref));
1350 // Disable the read barrier in SizeOf for performance, which is safe.
1351 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1352 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1353 region_space_->AddLiveBytes(to_ref, alloc_size);
1354 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001355 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001356 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001357 visitor(to_ref);
1358 }
1359}
1360
1361void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1362 Thread* self = Thread::Current();
1363 CHECK(thread_running_gc_ != nullptr);
1364 CHECK_EQ(self, thread_running_gc_);
1365 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1366 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1367 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1368 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1369 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1370 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1371 weak_ref_access_enabled_.StoreRelaxed(false);
1372 QuasiAtomic::ThreadFenceForConstructor();
1373 // Process the thread local mark stacks one last time after switching to the shared mark stack
1374 // mode and disable weak ref accesses.
1375 ProcessThreadLocalMarkStacks(true);
1376 if (kVerboseMode) {
1377 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1378 }
1379}
1380
1381void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1382 Thread* self = Thread::Current();
1383 CHECK(thread_running_gc_ != nullptr);
1384 CHECK_EQ(self, thread_running_gc_);
1385 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1386 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1387 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1388 static_cast<uint32_t>(kMarkStackModeShared));
1389 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1390 QuasiAtomic::ThreadFenceForConstructor();
1391 if (kVerboseMode) {
1392 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1393 }
1394}
1395
1396void ConcurrentCopying::CheckEmptyMarkStack() {
1397 Thread* self = Thread::Current();
1398 CHECK(thread_running_gc_ != nullptr);
1399 CHECK_EQ(self, thread_running_gc_);
1400 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1401 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1402 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1403 // Thread-local mark stack mode.
1404 RevokeThreadLocalMarkStacks(false);
1405 MutexLock mu(Thread::Current(), mark_stack_lock_);
1406 if (!revoked_mark_stacks_.empty()) {
1407 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1408 while (!mark_stack->IsEmpty()) {
1409 mirror::Object* obj = mark_stack->PopBack();
1410 if (kUseBakerReadBarrier) {
1411 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1412 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1413 << " is_marked=" << IsMarked(obj);
1414 } else {
1415 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1416 << " is_marked=" << IsMarked(obj);
1417 }
1418 }
1419 }
1420 LOG(FATAL) << "mark stack is not empty";
1421 }
1422 } else {
1423 // Shared, GC-exclusive, or off.
1424 MutexLock mu(Thread::Current(), mark_stack_lock_);
1425 CHECK(gc_mark_stack_->IsEmpty());
1426 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001427 }
1428}
1429
1430void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1431 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1432 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001433 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001434}
1435
1436void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1437 {
1438 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1439 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1440 if (kEnableFromSpaceAccountingCheck) {
1441 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1442 }
1443 heap_->MarkAllocStackAsLive(live_stack);
1444 live_stack->Reset();
1445 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001446 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001447 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1448 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1449 if (space->IsContinuousMemMapAllocSpace()) {
1450 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001451 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001452 continue;
1453 }
1454 TimingLogger::ScopedTiming split2(
1455 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1456 RecordFree(alloc_space->Sweep(swap_bitmaps));
1457 }
1458 }
1459 SweepLargeObjects(swap_bitmaps);
1460}
1461
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001462void ConcurrentCopying::MarkZygoteLargeObjects() {
1463 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1464 Thread* const self = Thread::Current();
1465 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
1466 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
1467 // Pick the current live bitmap (mark bitmap if swapped).
1468 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
1469 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
1470 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
1471 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
1472 reinterpret_cast<uintptr_t>(los->End()),
1473 [mark_bitmap, los, self](mirror::Object* obj)
1474 REQUIRES(Locks::heap_bitmap_lock_)
1475 SHARED_REQUIRES(Locks::mutator_lock_) {
1476 if (los->IsZygoteLargeObject(self, obj)) {
1477 mark_bitmap->Set(obj);
1478 }
1479 });
1480}
1481
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001482void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1483 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1484 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1485}
1486
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001487void ConcurrentCopying::ReclaimPhase() {
1488 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1489 if (kVerboseMode) {
1490 LOG(INFO) << "GC ReclaimPhase";
1491 }
1492 Thread* self = Thread::Current();
1493
1494 {
1495 // Double-check that the mark stack is empty.
1496 // Note: need to set this after VerifyNoFromSpaceRef().
1497 is_asserting_to_space_invariant_ = false;
1498 QuasiAtomic::ThreadFenceForConstructor();
1499 if (kVerboseMode) {
1500 LOG(INFO) << "Issue an empty check point. ";
1501 }
1502 IssueEmptyCheckpoint();
1503 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001504 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001505 if (kUseBakerReadBarrier) {
1506 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1507 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001508 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001509 }
1510
1511 {
1512 // Record freed objects.
1513 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1514 // Don't include thread-locals that are in the to-space.
1515 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1516 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1517 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1518 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1519 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001520 cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001521 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001522 cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001523 if (kEnableFromSpaceAccountingCheck) {
1524 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1525 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1526 }
1527 CHECK_LE(to_objects, from_objects);
1528 CHECK_LE(to_bytes, from_bytes);
1529 int64_t freed_bytes = from_bytes - to_bytes;
1530 int64_t freed_objects = from_objects - to_objects;
1531 if (kVerboseMode) {
1532 LOG(INFO) << "RecordFree:"
1533 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1534 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1535 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1536 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1537 << " from_space size=" << region_space_->FromSpaceSize()
1538 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1539 << " to_space size=" << region_space_->ToSpaceSize();
1540 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1541 }
1542 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1543 if (kVerboseMode) {
1544 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1545 }
1546 }
1547
1548 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001549 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1550 region_space_->ClearFromSpace();
1551 }
1552
1553 {
1554 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001555 Sweep(false);
1556 SwapBitmaps();
1557 heap_->UnBindBitmaps();
1558
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001559 // Delete the region bitmap.
1560 DCHECK(region_space_bitmap_ != nullptr);
1561 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001562 region_space_bitmap_ = nullptr;
1563 }
1564
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001565 CheckEmptyMarkStack();
1566
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001567 if (kVerboseMode) {
1568 LOG(INFO) << "GC end of ReclaimPhase";
1569 }
1570}
1571
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001572// Assert the to-space invariant.
1573void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1574 mirror::Object* ref) {
1575 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1576 if (is_asserting_to_space_invariant_) {
1577 if (region_space_->IsInToSpace(ref)) {
1578 // OK.
1579 return;
1580 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001581 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001582 } else if (region_space_->IsInFromSpace(ref)) {
1583 // Not OK. Do extra logging.
1584 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001585 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001586 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001587 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001588 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1589 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001590 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1591 }
1592 }
1593}
1594
1595class RootPrinter {
1596 public:
1597 RootPrinter() { }
1598
1599 template <class MirrorType>
1600 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001601 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001602 if (!root->IsNull()) {
1603 VisitRoot(root);
1604 }
1605 }
1606
1607 template <class MirrorType>
1608 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001609 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001610 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1611 }
1612
1613 template <class MirrorType>
1614 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001615 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001616 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1617 }
1618};
1619
1620void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1621 mirror::Object* ref) {
1622 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1623 if (is_asserting_to_space_invariant_) {
1624 if (region_space_->IsInToSpace(ref)) {
1625 // OK.
1626 return;
1627 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001628 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001629 } else if (region_space_->IsInFromSpace(ref)) {
1630 // Not OK. Do extra logging.
1631 if (gc_root_source == nullptr) {
1632 // No info.
1633 } else if (gc_root_source->HasArtField()) {
1634 ArtField* field = gc_root_source->GetArtField();
1635 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1636 RootPrinter root_printer;
1637 field->VisitRoots(root_printer);
1638 } else if (gc_root_source->HasArtMethod()) {
1639 ArtMethod* method = gc_root_source->GetArtMethod();
1640 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1641 RootPrinter root_printer;
Andreas Gampe542451c2016-07-26 09:02:02 -07001642 method->VisitRoots(root_printer, kRuntimePointerSize);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001643 }
1644 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1645 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1646 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1647 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1648 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1649 } else {
1650 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1651 }
1652 }
1653}
1654
1655void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1656 if (kUseBakerReadBarrier) {
1657 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1658 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1659 } else {
1660 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1661 }
1662 if (region_space_->IsInFromSpace(obj)) {
1663 LOG(INFO) << "holder is in the from-space.";
1664 } else if (region_space_->IsInToSpace(obj)) {
1665 LOG(INFO) << "holder is in the to-space.";
1666 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1667 LOG(INFO) << "holder is in the unevac from-space.";
Mathieu Chartierc381c362016-08-23 13:27:53 -07001668 if (IsMarkedInUnevacFromSpace(obj)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001669 LOG(INFO) << "holder is marked in the region space bitmap.";
1670 } else {
1671 LOG(INFO) << "holder is not marked in the region space bitmap.";
1672 }
1673 } else {
1674 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001675 if (immune_spaces_.ContainsObject(obj)) {
1676 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001677 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001678 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001679 accounting::ContinuousSpaceBitmap* mark_bitmap =
1680 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1681 accounting::LargeObjectBitmap* los_bitmap =
1682 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1683 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1684 bool is_los = mark_bitmap == nullptr;
1685 if (!is_los && mark_bitmap->Test(obj)) {
1686 LOG(INFO) << "holder is marked in the mark bit map.";
1687 } else if (is_los && los_bitmap->Test(obj)) {
1688 LOG(INFO) << "holder is marked in the los bit map.";
1689 } else {
1690 // If ref is on the allocation stack, then it is considered
1691 // mark/alive (but not necessarily on the live stack.)
1692 if (IsOnAllocStack(obj)) {
1693 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001694 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001695 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001696 }
1697 }
1698 }
1699 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001700 LOG(INFO) << "offset=" << offset.SizeValue();
1701}
1702
1703void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1704 mirror::Object* ref) {
1705 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001706 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001707 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001708 // Immune object may not be gray if called from the GC.
1709 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1710 return;
1711 }
1712 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1713 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001714 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001715 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1716 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1717 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001718 }
1719 } else {
1720 accounting::ContinuousSpaceBitmap* mark_bitmap =
1721 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1722 accounting::LargeObjectBitmap* los_bitmap =
1723 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1724 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1725 bool is_los = mark_bitmap == nullptr;
1726 if ((!is_los && mark_bitmap->Test(ref)) ||
1727 (is_los && los_bitmap->Test(ref))) {
1728 // OK.
1729 } else {
1730 // If ref is on the allocation stack, then it may not be
1731 // marked live, but considered marked/alive (but not
1732 // necessarily on the live stack).
1733 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1734 << "obj=" << obj << " ref=" << ref;
1735 }
1736 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001737}
1738
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001739// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001740class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001741 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001742 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001743 : collector_(collector) {}
1744
1745 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001746 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1747 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001748 collector_->Process(obj, offset);
1749 }
1750
1751 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001752 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001753 CHECK(klass->IsTypeOfReferenceClass());
1754 collector_->DelayReferenceReferent(klass, ref);
1755 }
1756
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001757 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001758 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001759 SHARED_REQUIRES(Locks::mutator_lock_) {
1760 if (!root->IsNull()) {
1761 VisitRoot(root);
1762 }
1763 }
1764
1765 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001766 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001767 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001768 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001769 }
1770
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001771 private:
1772 ConcurrentCopying* const collector_;
1773};
1774
1775// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001776inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001777 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001778 // Avoid all read barriers during visit references to help performance.
1779 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1780 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001781 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001782 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001783 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001784 // Disable the read barrier for a performance reason.
1785 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1786 visitor, visitor);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001787 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001788 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1789 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001790}
1791
1792// Process a field.
1793inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001794 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001795 mirror::Object* ref = obj->GetFieldObject<
1796 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Mathieu Chartierc381c362016-08-23 13:27:53 -07001797 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001798 if (to_ref == ref) {
1799 return;
1800 }
1801 // This may fail if the mutator writes to the field at the same time. But it's ok.
1802 mirror::Object* expected_ref = ref;
1803 mirror::Object* new_ref = to_ref;
1804 do {
1805 if (expected_ref !=
1806 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1807 // It was updated by the mutator.
1808 break;
1809 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001810 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001811 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001812}
1813
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001814// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001815inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001816 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1817 for (size_t i = 0; i < count; ++i) {
1818 mirror::Object** root = roots[i];
1819 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001820 mirror::Object* to_ref = Mark(ref);
1821 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001822 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001823 }
1824 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1825 mirror::Object* expected_ref = ref;
1826 mirror::Object* new_ref = to_ref;
1827 do {
1828 if (expected_ref != addr->LoadRelaxed()) {
1829 // It was updated by the mutator.
1830 break;
1831 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001832 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001833 }
1834}
1835
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001836template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001837inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001838 DCHECK(!root->IsNull());
1839 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001840 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001841 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001842 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1843 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1844 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001845 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001846 do {
1847 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1848 // It was updated by the mutator.
1849 break;
1850 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001851 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001852 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001853}
1854
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001855inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001856 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1857 const RootInfo& info ATTRIBUTE_UNUSED) {
1858 for (size_t i = 0; i < count; ++i) {
1859 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1860 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001861 // kGrayImmuneObject is true because this is used for the thread flip.
1862 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001863 }
1864 }
1865}
1866
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001867// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1868class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1869 public:
1870 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1871 : collector_(collector), enabled_(false) {
1872 if (kUseBakerReadBarrier &&
1873 collector_->thread_running_gc_ == Thread::Current() &&
1874 !collector_->gc_grays_immune_objects_) {
1875 collector_->gc_grays_immune_objects_ = true;
1876 enabled_ = true;
1877 }
1878 }
1879
1880 ~ScopedGcGraysImmuneObjects() {
1881 if (kUseBakerReadBarrier &&
1882 collector_->thread_running_gc_ == Thread::Current() &&
1883 enabled_) {
1884 DCHECK(collector_->gc_grays_immune_objects_);
1885 collector_->gc_grays_immune_objects_ = false;
1886 }
1887 }
1888
1889 private:
1890 ConcurrentCopying* const collector_;
1891 bool enabled_;
1892};
1893
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001894// Fill the given memory block with a dummy object. Used to fill in a
1895// copy of objects that was lost in race.
1896void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001897 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1898 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1899 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1900 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001901 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001902 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001903 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
1904 // Explicitly mark to make sure to get an object in the to-space.
1905 mirror::Class* int_array_class = down_cast<mirror::Class*>(
1906 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001907 CHECK(int_array_class != nullptr);
1908 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001909 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001910 CHECK_EQ(component_size, sizeof(int32_t));
1911 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1912 if (data_offset > byte_size) {
1913 // An int array is too big. Use java.lang.Object.
1914 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1915 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001916 CHECK_EQ(byte_size, (java_lang_Object->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001917 dummy_obj->SetClass(java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001918 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001919 } else {
1920 // Use an int array.
1921 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001922 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001923 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001924 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
1925 dummy_arr->SetLength(length);
1926 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001927 << "byte_size=" << byte_size << " length=" << length
1928 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001929 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001930 << "byte_size=" << byte_size << " length=" << length
1931 << " component_size=" << component_size << " data_offset=" << data_offset;
1932 }
1933}
1934
1935// Reuse the memory blocks that were copy of objects that were lost in race.
1936mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1937 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001938 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001939 Thread* self = Thread::Current();
1940 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001941 size_t byte_size;
1942 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001943 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001944 MutexLock mu(self, skipped_blocks_lock_);
1945 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1946 if (it == skipped_blocks_map_.end()) {
1947 // Not found.
1948 return nullptr;
1949 }
1950 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001951 CHECK_GE(byte_size, alloc_size);
1952 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1953 // If remainder would be too small for a dummy object, retry with a larger request size.
1954 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1955 if (it == skipped_blocks_map_.end()) {
1956 // Not found.
1957 return nullptr;
1958 }
Roland Levillain14d90572015-07-16 10:52:26 +01001959 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001960 CHECK_GE(it->first - alloc_size, min_object_size)
1961 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1962 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001963 // Found a block.
1964 CHECK(it != skipped_blocks_map_.end());
1965 byte_size = it->first;
1966 addr = it->second;
1967 CHECK_GE(byte_size, alloc_size);
1968 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1969 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
1970 if (kVerboseMode) {
1971 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1972 }
1973 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001974 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001975 memset(addr, 0, byte_size);
1976 if (byte_size > alloc_size) {
1977 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001978 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001979 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001980 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
1981 // violation and possible deadlock. The deadlock case is a recursive case:
1982 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001983 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1984 byte_size - alloc_size);
1985 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001986 {
1987 MutexLock mu(self, skipped_blocks_lock_);
1988 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1989 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001990 }
1991 return reinterpret_cast<mirror::Object*>(addr);
1992}
1993
1994mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1995 DCHECK(region_space_->IsInFromSpace(from_ref));
1996 // No read barrier to avoid nested RB that might violate the to-space
1997 // invariant. Note that from_ref is a from space ref so the SizeOf()
1998 // call will access the from-space meta objects, but it's ok and necessary.
1999 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
2000 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2001 size_t region_space_bytes_allocated = 0U;
2002 size_t non_moving_space_bytes_allocated = 0U;
2003 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002004 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002005 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002006 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002007 bytes_allocated = region_space_bytes_allocated;
2008 if (to_ref != nullptr) {
2009 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
2010 }
2011 bool fall_back_to_non_moving = false;
2012 if (UNLIKELY(to_ref == nullptr)) {
2013 // Failed to allocate in the region space. Try the skipped blocks.
2014 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
2015 if (to_ref != nullptr) {
2016 // Succeeded to allocate in a skipped block.
2017 if (heap_->use_tlab_) {
2018 // This is necessary for the tlab case as it's not accounted in the space.
2019 region_space_->RecordAlloc(to_ref);
2020 }
2021 bytes_allocated = region_space_alloc_size;
2022 } else {
2023 // Fall back to the non-moving space.
2024 fall_back_to_non_moving = true;
2025 if (kVerboseMode) {
2026 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
2027 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
2028 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
2029 }
2030 fall_back_to_non_moving = true;
2031 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002032 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002033 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
2034 bytes_allocated = non_moving_space_bytes_allocated;
2035 // Mark it in the mark bitmap.
2036 accounting::ContinuousSpaceBitmap* mark_bitmap =
2037 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2038 CHECK(mark_bitmap != nullptr);
2039 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
2040 }
2041 }
2042 DCHECK(to_ref != nullptr);
2043
2044 // Attempt to install the forward pointer. This is in a loop as the
2045 // lock word atomic write can fail.
2046 while (true) {
2047 // Copy the object. TODO: copy only the lockword in the second iteration and on?
2048 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002049
2050 LockWord old_lock_word = to_ref->GetLockWord(false);
2051
2052 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2053 // Lost the race. Another thread (either GC or mutator) stored
2054 // the forwarding pointer first. Make the lost copy (to_ref)
2055 // look like a valid but dead (dummy) object and keep it for
2056 // future reuse.
2057 FillWithDummyObject(to_ref, bytes_allocated);
2058 if (!fall_back_to_non_moving) {
2059 DCHECK(region_space_->IsInToSpace(to_ref));
2060 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2061 // Free the large alloc.
2062 region_space_->FreeLarge(to_ref, bytes_allocated);
2063 } else {
2064 // Record the lost copy for later reuse.
2065 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2066 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2067 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2068 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2069 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2070 reinterpret_cast<uint8_t*>(to_ref)));
2071 }
2072 } else {
2073 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2074 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2075 // Free the non-moving-space chunk.
2076 accounting::ContinuousSpaceBitmap* mark_bitmap =
2077 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2078 CHECK(mark_bitmap != nullptr);
2079 CHECK(mark_bitmap->Clear(to_ref));
2080 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2081 }
2082
2083 // Get the winner's forward ptr.
2084 mirror::Object* lost_fwd_ptr = to_ref;
2085 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2086 CHECK(to_ref != nullptr);
2087 CHECK_NE(to_ref, lost_fwd_ptr);
2088 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
2089 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2090 return to_ref;
2091 }
2092
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002093 // Set the gray ptr.
2094 if (kUseBakerReadBarrier) {
2095 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
2096 }
2097
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002098 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2099
2100 // Try to atomically write the fwd ptr.
2101 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
2102 if (LIKELY(success)) {
2103 // The CAS succeeded.
2104 objects_moved_.FetchAndAddSequentiallyConsistent(1);
2105 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
2106 if (LIKELY(!fall_back_to_non_moving)) {
2107 DCHECK(region_space_->IsInToSpace(to_ref));
2108 } else {
2109 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2110 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2111 }
2112 if (kUseBakerReadBarrier) {
2113 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2114 }
2115 DCHECK(GetFwdPtr(from_ref) == to_ref);
2116 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002117 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002118 return to_ref;
2119 } else {
2120 // The CAS failed. It may have lost the race or may have failed
2121 // due to monitor/hashcode ops. Either way, retry.
2122 }
2123 }
2124}
2125
2126mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2127 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002128 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2129 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002130 // It's already marked.
2131 return from_ref;
2132 }
2133 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002134 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002135 to_ref = GetFwdPtr(from_ref);
2136 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2137 heap_->non_moving_space_->HasAddress(to_ref))
2138 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002139 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07002140 if (IsMarkedInUnevacFromSpace(from_ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002141 to_ref = from_ref;
2142 } else {
2143 to_ref = nullptr;
2144 }
2145 } else {
2146 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002147 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002148 // An immune object is alive.
2149 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002150 } else {
2151 // Non-immune non-moving space. Use the mark bitmap.
2152 accounting::ContinuousSpaceBitmap* mark_bitmap =
2153 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2154 accounting::LargeObjectBitmap* los_bitmap =
2155 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2156 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2157 bool is_los = mark_bitmap == nullptr;
2158 if (!is_los && mark_bitmap->Test(from_ref)) {
2159 // Already marked.
2160 to_ref = from_ref;
2161 } else if (is_los && los_bitmap->Test(from_ref)) {
2162 // Already marked in LOS.
2163 to_ref = from_ref;
2164 } else {
2165 // Not marked.
2166 if (IsOnAllocStack(from_ref)) {
2167 // If on the allocation stack, it's considered marked.
2168 to_ref = from_ref;
2169 } else {
2170 // Not marked.
2171 to_ref = nullptr;
2172 }
2173 }
2174 }
2175 }
2176 return to_ref;
2177}
2178
2179bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2180 QuasiAtomic::ThreadFenceAcquire();
2181 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002182 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002183}
2184
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002185mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2186 // ref is in a non-moving space (from_ref == to_ref).
2187 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002188 DCHECK(!immune_spaces_.ContainsObject(ref));
2189 // Use the mark bitmap.
2190 accounting::ContinuousSpaceBitmap* mark_bitmap =
2191 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2192 accounting::LargeObjectBitmap* los_bitmap =
2193 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2194 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2195 bool is_los = mark_bitmap == nullptr;
2196 if (!is_los && mark_bitmap->Test(ref)) {
2197 // Already marked.
2198 if (kUseBakerReadBarrier) {
2199 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2200 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002201 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002202 } else if (is_los && los_bitmap->Test(ref)) {
2203 // Already marked in LOS.
2204 if (kUseBakerReadBarrier) {
2205 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2206 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2207 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002208 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002209 // Not marked.
2210 if (IsOnAllocStack(ref)) {
2211 // If it's on the allocation stack, it's considered marked. Keep it white.
2212 // Objects on the allocation stack need not be marked.
2213 if (!is_los) {
2214 DCHECK(!mark_bitmap->Test(ref));
2215 } else {
2216 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002217 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002218 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002219 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002220 }
2221 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002222 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2223 // kRegionTypeUnevacFromSpace-case comment in Mark().
2224 if (kUseBakerReadBarrier) {
2225 // Test the bitmap first to reduce the chance of false gray cases.
2226 if ((!is_los && mark_bitmap->Test(ref)) ||
2227 (is_los && los_bitmap->Test(ref))) {
2228 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002229 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002230 }
2231 // Not marked or on the allocation stack. Try to mark it.
2232 // This may or may not succeed, which is ok.
2233 bool cas_success = false;
2234 if (kUseBakerReadBarrier) {
2235 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
2236 ReadBarrier::GrayPtr());
2237 }
2238 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2239 // Already marked.
2240 if (kUseBakerReadBarrier && cas_success &&
2241 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2242 PushOntoFalseGrayStack(ref);
2243 }
2244 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2245 // Already marked in LOS.
2246 if (kUseBakerReadBarrier && cas_success &&
2247 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2248 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002249 }
2250 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002251 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002252 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002253 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002254 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002255 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002256 }
2257 }
2258 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002259 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002260}
2261
2262void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002263 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002264 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002265 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002266 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2267 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002268 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002269 {
2270 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2271 skipped_blocks_map_.clear();
2272 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002273 {
2274 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002275 {
2276 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2277 heap_->ClearMarkedObjects();
2278 }
2279 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2280 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2281 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2282 gc::Heap* const heap = Runtime::Current()->GetHeap();
2283 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2284 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2285 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
2286 // Filter out cards that don't need to be set.
2287 if (table != nullptr) {
2288 table->FilterCards();
2289 }
2290 }
2291 }
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002292 if (kUseBakerReadBarrier) {
2293 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
2294 DCHECK(rb_mark_bit_stack_.get() != nullptr);
2295 const auto* limit = rb_mark_bit_stack_->End();
2296 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
2297 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
2298 }
2299 rb_mark_bit_stack_->Reset();
2300 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002301 }
2302 if (measure_read_barrier_slow_path_) {
2303 MutexLock mu(self, rb_slow_path_histogram_lock_);
2304 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2305 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2306 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2307 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002308}
2309
Mathieu Chartier97509952015-07-13 14:35:43 -07002310bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002311 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002312 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002313 if (to_ref == nullptr) {
2314 return false;
2315 }
2316 if (from_ref != to_ref) {
2317 QuasiAtomic::ThreadFenceRelease();
2318 field->Assign(to_ref);
2319 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2320 }
2321 return true;
2322}
2323
Mathieu Chartier97509952015-07-13 14:35:43 -07002324mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2325 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002326}
2327
2328void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002329 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002330}
2331
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002332void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002333 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002334 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002335 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2336 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002337 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002338}
2339
2340void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2341 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2342 region_space_->RevokeAllThreadLocalBuffers();
2343}
2344
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002345mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2346 if (Thread::Current() != thread_running_gc_) {
2347 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2348 } else {
2349 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2350 }
2351 ScopedTrace tr(__FUNCTION__);
2352 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2353 mirror::Object* ret = Mark(from_ref);
2354 if (measure_read_barrier_slow_path_) {
2355 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2356 }
2357 return ret;
2358}
2359
2360void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2361 GarbageCollector::DumpPerformanceInfo(os);
2362 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2363 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2364 Histogram<uint64_t>::CumulativeData cumulative_data;
2365 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2366 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2367 }
2368 if (rb_slow_path_count_total_ > 0) {
2369 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2370 }
2371 if (rb_slow_path_count_gc_total_ > 0) {
2372 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2373 }
Mathieu Chartiercca44a02016-08-17 10:07:29 -07002374 os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
2375 os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002376}
2377
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002378} // namespace collector
2379} // namespace gc
2380} // namespace art