blob: 3dee974bed29e349e85d5baebc7415b967ee2110 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070020#include "base/enums.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070021#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070022#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070023#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070024#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070026#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070028#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080030#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080031#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070033#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080034#include "mirror/object-inl.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070035#include "scoped_thread_state_change-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080036#include "thread-inl.h"
37#include "thread_list.h"
38#include "well_known_classes.h"
39
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070040namespace art {
41namespace gc {
42namespace collector {
43
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070044static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070045// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
46// union table. Disabled since it does not seem to help the pause much.
47static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070048// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
49// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
50// Only enabled for kIsDebugBuild to avoid performance hit.
51static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070052// Slow path mark stack size, increase this if the stack is getting full and it is causing
53// performance problems.
54static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070055
Mathieu Chartier56fe2582016-07-14 13:30:03 -070056ConcurrentCopying::ConcurrentCopying(Heap* heap,
57 const std::string& name_prefix,
58 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080059 : GarbageCollector(heap,
60 name_prefix + (name_prefix.empty() ? "" : " ") +
61 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070062 region_space_(nullptr), gc_barrier_(new Barrier(0)),
63 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070064 kDefaultGcMarkStackSize,
65 kDefaultGcMarkStackSize)),
Mathieu Chartier36a270a2016-07-28 18:08:51 -070066 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
67 kReadBarrierMarkStackSize,
68 kReadBarrierMarkStackSize)),
69 rb_mark_bit_stack_full_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
71 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080072 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070073 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070074 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
75 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070077 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
78 rb_slow_path_ns_(0),
79 rb_slow_path_count_(0),
80 rb_slow_path_count_gc_(0),
81 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
82 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
83 rb_slow_path_count_total_(0),
84 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080085 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070086 force_evacuate_all_(false),
87 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
88 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
90 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070091 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080092 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
94 // Cache this so that we won't have to lock heap_bitmap_lock_ in
95 // Mark() which could cause a nested lock on heap_bitmap_lock_
96 // when GC causes a RB while doing GC or a lock order violation
97 // (class_linker_lock_ and heap_bitmap_lock_).
98 heap_mark_bitmap_ = heap->GetMarkBitmap();
99 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700100 {
101 MutexLock mu(self, mark_stack_lock_);
102 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
103 accounting::AtomicStack<mirror::Object>* mark_stack =
104 accounting::AtomicStack<mirror::Object>::Create(
105 "thread local mark stack", kMarkStackSize, kMarkStackSize);
106 pooled_mark_stacks_.push_back(mark_stack);
107 }
108 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800109}
110
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700111void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
112 // Used for preserving soft references, should be OK to not have a CAS here since there should be
113 // no other threads which can trigger read barriers on the same referent during reference
114 // processing.
115 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700116 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700117}
118
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800119ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700120 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800121}
122
123void ConcurrentCopying::RunPhases() {
124 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
125 CHECK(!is_active_);
126 is_active_ = true;
127 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700128 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800129 Locks::mutator_lock_->AssertNotHeld(self);
130 {
131 ReaderMutexLock mu(self, *Locks::mutator_lock_);
132 InitializePhase();
133 }
134 FlipThreadRoots();
135 {
136 ReaderMutexLock mu(self, *Locks::mutator_lock_);
137 MarkingPhase();
138 }
139 // Verify no from space refs. This causes a pause.
140 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
141 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
142 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700143 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800144 if (kVerboseMode) {
145 LOG(INFO) << "Verifying no from-space refs";
146 }
147 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700148 if (kVerboseMode) {
149 LOG(INFO) << "Done verifying no from-space refs";
150 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700151 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800152 }
153 {
154 ReaderMutexLock mu(self, *Locks::mutator_lock_);
155 ReclaimPhase();
156 }
157 FinishPhase();
158 CHECK(is_active_);
159 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700160 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800161}
162
163void ConcurrentCopying::BindBitmaps() {
164 Thread* self = Thread::Current();
165 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
166 // Mark all of the spaces we never collect as immune.
167 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800168 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
169 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800170 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800171 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172 } else if (space == region_space_) {
173 accounting::ContinuousSpaceBitmap* bitmap =
174 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
175 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800176 region_space_bitmap_ = bitmap;
177 }
178 }
179}
180
181void ConcurrentCopying::InitializePhase() {
182 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
183 if (kVerboseMode) {
184 LOG(INFO) << "GC InitializePhase";
185 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
186 << reinterpret_cast<void*>(region_space_->Limit());
187 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700188 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800189 if (kIsDebugBuild) {
190 MutexLock mu(Thread::Current(), mark_stack_lock_);
191 CHECK(false_gray_stack_.empty());
192 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700193
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700194 rb_mark_bit_stack_full_ = false;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700195 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
196 if (measure_read_barrier_slow_path_) {
197 rb_slow_path_ns_.StoreRelaxed(0);
198 rb_slow_path_count_.StoreRelaxed(0);
199 rb_slow_path_count_gc_.StoreRelaxed(0);
200 }
201
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800202 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800203 bytes_moved_.StoreRelaxed(0);
204 objects_moved_.StoreRelaxed(0);
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700205 GcCause gc_cause = GetCurrentIteration()->GetGcCause();
206 if (gc_cause == kGcCauseExplicit ||
207 gc_cause == kGcCauseForNativeAlloc ||
208 gc_cause == kGcCauseCollectorTransition ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800209 GetCurrentIteration()->GetClearSoftReferences()) {
210 force_evacuate_all_ = true;
211 } else {
212 force_evacuate_all_ = false;
213 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700214 if (kUseBakerReadBarrier) {
215 updated_all_immune_objects_.StoreRelaxed(false);
216 // GC may gray immune objects in the thread flip.
217 gc_grays_immune_objects_ = true;
218 if (kIsDebugBuild) {
219 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
220 DCHECK(immune_gray_stack_.empty());
221 }
222 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800223 BindBitmaps();
224 if (kVerboseMode) {
225 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800226 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
227 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
228 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
229 LOG(INFO) << "Immune space: " << *space;
230 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 LOG(INFO) << "GC end of InitializePhase";
232 }
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700233 // Mark all of the zygote large objects without graying them.
234 MarkZygoteLargeObjects();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800235}
236
237// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700238class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800239 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100240 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800241 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
242 }
243
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700244 virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800245 // Note: self is not necessarily equal to thread since thread may be suspended.
246 Thread* self = Thread::Current();
247 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
248 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700249 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800250 if (use_tlab_ && thread->HasTlab()) {
251 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
252 // This must come before the revoke.
253 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
254 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
255 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
256 FetchAndAddSequentiallyConsistent(thread_local_objects);
257 } else {
258 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
259 }
260 }
261 if (kUseThreadLocalAllocationStack) {
262 thread->RevokeThreadLocalAllocationStack();
263 }
264 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700265 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
266 // only.
267 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800268 concurrent_copying_->GetBarrier().Pass(self);
269 }
270
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700271 void VisitRoots(mirror::Object*** roots,
272 size_t count,
273 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700274 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700275 for (size_t i = 0; i < count; ++i) {
276 mirror::Object** root = roots[i];
277 mirror::Object* ref = *root;
278 if (ref != nullptr) {
279 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
280 if (to_ref != ref) {
281 *root = to_ref;
282 }
283 }
284 }
285 }
286
287 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
288 size_t count,
289 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700290 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700291 for (size_t i = 0; i < count; ++i) {
292 mirror::CompressedReference<mirror::Object>* const root = roots[i];
293 if (!root->IsNull()) {
294 mirror::Object* ref = root->AsMirrorPtr();
295 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
296 if (to_ref != ref) {
297 root->Assign(to_ref);
298 }
299 }
300 }
301 }
302
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800303 private:
304 ConcurrentCopying* const concurrent_copying_;
305 const bool use_tlab_;
306};
307
308// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700309class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800310 public:
311 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
312 : concurrent_copying_(concurrent_copying) {
313 }
314
Mathieu Chartier90443472015-07-16 20:32:27 -0700315 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 ConcurrentCopying* cc = concurrent_copying_;
317 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
318 // Note: self is not necessarily equal to thread since thread may be suspended.
319 Thread* self = Thread::Current();
320 CHECK(thread == self);
321 Locks::mutator_lock_->AssertExclusiveHeld(self);
322 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700323 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800324 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
325 cc->RecordLiveStackFreezeSize(self);
326 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
327 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
328 }
329 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700330 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800331 if (kIsDebugBuild) {
332 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
333 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800334 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800335 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700337 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800338 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700339 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
340 cc->GrayAllDirtyImmuneObjects();
341 if (kIsDebugBuild) {
342 // Check that all non-gray immune objects only refernce immune objects.
343 cc->VerifyGrayImmuneObjects();
344 }
345 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800346 }
347
348 private:
349 ConcurrentCopying* const concurrent_copying_;
350};
351
Mathieu Chartier21328a12016-07-22 10:47:45 -0700352class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
353 public:
354 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
355 : collector_(collector) {}
356
Mathieu Chartier31e88222016-10-14 18:43:19 -0700357 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700358 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
359 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700360 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
361 obj, offset);
362 }
363
Mathieu Chartier31e88222016-10-14 18:43:19 -0700364 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700365 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700366 CHECK(klass->IsTypeOfReferenceClass());
367 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
368 ref,
369 mirror::Reference::ReferentOffset());
370 }
371
372 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
373 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700374 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700375 if (!root->IsNull()) {
376 VisitRoot(root);
377 }
378 }
379
380 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
381 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700382 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700383 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
384 }
385
386 private:
387 ConcurrentCopying* const collector_;
388
Mathieu Chartier31e88222016-10-14 18:43:19 -0700389 void CheckReference(ObjPtr<mirror::Object> ref,
390 ObjPtr<mirror::Object> holder,
391 MemberOffset offset) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700392 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700393 if (ref != nullptr) {
Mathieu Chartier31e88222016-10-14 18:43:19 -0700394 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700395 // Not immune, must be a zygote large object.
396 CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
Mathieu Chartier31e88222016-10-14 18:43:19 -0700397 Thread::Current(), ref.Ptr()))
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700398 << "Non gray object references non immune, non zygote large object "<< ref << " "
399 << PrettyTypeOf(ref) << " in holder " << holder << " " << PrettyTypeOf(holder)
400 << " offset=" << offset.Uint32Value();
401 } else {
402 // Make sure the large object class is immune since we will never scan the large object.
403 CHECK(collector_->immune_spaces_.ContainsObject(
404 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
405 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700406 }
407 }
408};
409
410void ConcurrentCopying::VerifyGrayImmuneObjects() {
411 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
412 for (auto& space : immune_spaces_.GetSpaces()) {
413 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
414 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
415 VerifyGrayImmuneObjectsVisitor visitor(this);
416 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
417 reinterpret_cast<uintptr_t>(space->Limit()),
418 [&visitor](mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700419 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700420 // If an object is not gray, it should only have references to things in the immune spaces.
421 if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
422 obj->VisitReferences</*kVisitNativeRoots*/true,
423 kDefaultVerifyFlags,
424 kWithoutReadBarrier>(visitor, visitor);
425 }
426 });
427 }
428}
429
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800430// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
431void ConcurrentCopying::FlipThreadRoots() {
432 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
433 if (kVerboseMode) {
434 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700435 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800436 }
437 Thread* self = Thread::Current();
438 Locks::mutator_lock_->AssertNotHeld(self);
439 gc_barrier_->Init(self, 0);
440 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
441 FlipCallback flip_callback(this);
442 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
443 &thread_flip_visitor, &flip_callback, this);
444 {
445 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
446 gc_barrier_->Increment(self, barrier_count);
447 }
448 is_asserting_to_space_invariant_ = true;
449 QuasiAtomic::ThreadFenceForConstructor();
450 if (kVerboseMode) {
451 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700452 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800453 LOG(INFO) << "GC end of FlipThreadRoots";
454 }
455}
456
Mathieu Chartier21328a12016-07-22 10:47:45 -0700457class ConcurrentCopying::GrayImmuneObjectVisitor {
458 public:
459 explicit GrayImmuneObjectVisitor() {}
460
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700461 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700462 if (kUseBakerReadBarrier) {
463 if (kIsDebugBuild) {
464 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
465 }
466 obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
467 }
468 }
469
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700470 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700471 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
472 }
473};
474
475void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
476 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
477 gc::Heap* const heap = Runtime::Current()->GetHeap();
478 accounting::CardTable* const card_table = heap->GetCardTable();
479 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
480 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
481 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
482 GrayImmuneObjectVisitor visitor;
483 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
484 // Mark all the objects on dirty cards since these may point to objects in other space.
485 // Once these are marked, the GC will eventually clear them later.
486 // Table is non null for boot image and zygote spaces. It is only null for application image
487 // spaces.
488 if (table != nullptr) {
489 // TODO: Add preclean outside the pause.
490 table->ClearCards();
491 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
492 } else {
493 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
494 // pause because app image spaces are all dirty pages anyways.
495 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
496 }
497 }
498 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
499 // barriers in the immune spaces.
500 updated_all_immune_objects_.StoreRelaxed(true);
501}
502
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700503void ConcurrentCopying::SwapStacks() {
504 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800505}
506
507void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
508 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
509 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
510}
511
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800512class EmptyCheckpoint : public Closure {
513 public:
514 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
515 : concurrent_copying_(concurrent_copying) {
516 }
517
518 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
519 // Note: self is not necessarily equal to thread since thread may be suspended.
520 Thread* self = Thread::Current();
521 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
522 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800523 // If thread is a running mutator, then act on behalf of the garbage collector.
524 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700525 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800526 }
527
528 private:
529 ConcurrentCopying* const concurrent_copying_;
530};
531
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700532// Used to visit objects in the immune spaces.
533inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
534 DCHECK(obj != nullptr);
535 DCHECK(immune_spaces_.ContainsObject(obj));
536 // Update the fields without graying it or pushing it onto the mark stack.
537 Scan(obj);
538}
539
540class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
541 public:
542 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
543 : collector_(cc) {}
544
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700545 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700546 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
547 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
548 collector_->ScanImmuneObject(obj);
549 // Done scanning the object, go back to white.
550 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
551 ReadBarrier::WhitePtr());
552 CHECK(success);
553 }
554 } else {
555 collector_->ScanImmuneObject(obj);
556 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700557 }
558
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700559 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700560 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
561 }
562
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700563 private:
564 ConcurrentCopying* const collector_;
565};
566
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800567// Concurrently mark roots that are guarded by read barriers and process the mark stack.
568void ConcurrentCopying::MarkingPhase() {
569 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
570 if (kVerboseMode) {
571 LOG(INFO) << "GC MarkingPhase";
572 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700573 Thread* self = Thread::Current();
574 if (kIsDebugBuild) {
575 MutexLock mu(self, *Locks::thread_list_lock_);
576 CHECK(weak_ref_access_enabled_);
577 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700578
579 // Scan immune spaces.
580 // Update all the fields in the immune spaces first without graying the objects so that we
581 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
582 // of the objects.
583 if (kUseBakerReadBarrier) {
584 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700585 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700586 {
587 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
588 for (auto& space : immune_spaces_.GetSpaces()) {
589 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
590 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700591 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700592 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700593 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
594 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
595 } else {
596 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
597 reinterpret_cast<uintptr_t>(space->Limit()),
598 visitor);
599 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700600 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700601 }
602 if (kUseBakerReadBarrier) {
603 // This release fence makes the field updates in the above loop visible before allowing mutator
604 // getting access to immune objects without graying it first.
605 updated_all_immune_objects_.StoreRelease(true);
606 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
607 // the above loop because we would incorrectly disable the read barrier by whitening an object
608 // which may point to an unscanned, white object, breaking the to-space invariant.
609 //
610 // Make sure no mutators are in the middle of marking an immune object before whitening immune
611 // objects.
612 IssueEmptyCheckpoint();
613 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
614 if (kVerboseMode) {
615 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
616 }
617 for (mirror::Object* obj : immune_gray_stack_) {
618 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
619 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
620 ReadBarrier::WhitePtr());
621 DCHECK(success);
622 }
623 immune_gray_stack_.clear();
624 }
625
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800626 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700627 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
628 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800629 }
630 {
631 // TODO: don't visit the transaction roots if it's not active.
632 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700633 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800634 }
635
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800636 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700637 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700638 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
639 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
640 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
641 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
642 // reach the point where we process weak references, we can avoid using a lock when accessing
643 // the GC mark stack, which makes mark stack processing more efficient.
644
645 // Process the mark stack once in the thread local stack mode. This marks most of the live
646 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
647 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
648 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800649 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700650 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
651 // for the last time before transitioning to the shared mark stack mode, which would process new
652 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
653 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
654 // important to do these together in a single checkpoint so that we can ensure that mutators
655 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
656 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
657 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
658 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
659 SwitchToSharedMarkStackMode();
660 CHECK(!self->GetWeakRefAccessEnabled());
661 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
662 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
663 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
664 // (via read barriers) have no way to produce any more refs to process. Marking converges once
665 // before we process weak refs below.
666 ProcessMarkStack();
667 CheckEmptyMarkStack();
668 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
669 // lock from this point on.
670 SwitchToGcExclusiveMarkStackMode();
671 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800672 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800673 LOG(INFO) << "ProcessReferences";
674 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700675 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700676 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700677 ProcessReferences(self);
678 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800679 if (kVerboseMode) {
680 LOG(INFO) << "SweepSystemWeaks";
681 }
682 SweepSystemWeaks(self);
683 if (kVerboseMode) {
684 LOG(INFO) << "SweepSystemWeaks done";
685 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700686 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
687 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
688 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800689 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700690 CheckEmptyMarkStack();
691 // Re-enable weak ref accesses.
692 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700693 // Free data for class loaders that we unloaded.
694 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700695 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700696 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800697 if (kUseBakerReadBarrier) {
698 ProcessFalseGrayStack();
699 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700700 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800701 }
702
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700703 if (kIsDebugBuild) {
704 MutexLock mu(self, *Locks::thread_list_lock_);
705 CHECK(weak_ref_access_enabled_);
706 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800707 if (kVerboseMode) {
708 LOG(INFO) << "GC end of MarkingPhase";
709 }
710}
711
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700712void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
713 if (kVerboseMode) {
714 LOG(INFO) << "ReenableWeakRefAccess";
715 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700716 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
717 {
718 MutexLock mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700719 weak_ref_access_enabled_ = true; // This is for new threads.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700720 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
721 for (Thread* thread : thread_list) {
722 thread->SetWeakRefAccessEnabled(true);
723 }
724 }
725 // Unblock blocking threads.
726 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
727 Runtime::Current()->BroadcastForNewSystemWeaks();
728}
729
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700730class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700731 public:
732 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
733 : concurrent_copying_(concurrent_copying) {
734 }
735
736 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
737 // Note: self is not necessarily equal to thread since thread may be suspended.
738 Thread* self = Thread::Current();
739 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
740 << thread->GetState() << " thread " << thread << " self " << self;
741 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700742 // Note a thread that has just started right before this checkpoint may have already this flag
743 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700744 thread->SetIsGcMarking(false);
745 // If thread is a running mutator, then act on behalf of the garbage collector.
746 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700747 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700748 }
749
750 private:
751 ConcurrentCopying* const concurrent_copying_;
752};
753
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700754class ConcurrentCopying::DisableMarkingCallback : public Closure {
755 public:
756 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
757 : concurrent_copying_(concurrent_copying) {
758 }
759
760 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
761 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
762 // to avoid a race with ThreadList::Register().
763 CHECK(concurrent_copying_->is_marking_);
764 concurrent_copying_->is_marking_ = false;
765 }
766
767 private:
768 ConcurrentCopying* const concurrent_copying_;
769};
770
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700771void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
772 Thread* self = Thread::Current();
773 DisableMarkingCheckpoint check_point(this);
774 ThreadList* thread_list = Runtime::Current()->GetThreadList();
775 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700776 DisableMarkingCallback dmc(this);
777 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700778 // If there are no threads to wait which implies that all the checkpoint functions are finished,
779 // then no need to release the mutator lock.
780 if (barrier_count == 0) {
781 return;
782 }
783 // Release locks then wait for all mutator threads to pass the barrier.
784 Locks::mutator_lock_->SharedUnlock(self);
785 {
786 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
787 gc_barrier_->Increment(self, barrier_count);
788 }
789 Locks::mutator_lock_->SharedLock(self);
790}
791
792void ConcurrentCopying::DisableMarking() {
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700793 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
794 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
795 // cached in a local variable.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700796 IssueDisableMarkingCheckpoint();
797 if (kUseTableLookupReadBarrier) {
798 heap_->rb_table_->ClearAll();
799 DCHECK(heap_->rb_table_->IsAllCleared());
800 }
801 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
802 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
803}
804
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800805void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
806 CHECK(kUseBakerReadBarrier);
807 DCHECK(ref != nullptr);
808 MutexLock mu(Thread::Current(), mark_stack_lock_);
809 false_gray_stack_.push_back(ref);
810}
811
812void ConcurrentCopying::ProcessFalseGrayStack() {
813 CHECK(kUseBakerReadBarrier);
814 // Change the objects on the false gray stack from gray to white.
815 MutexLock mu(Thread::Current(), mark_stack_lock_);
816 for (mirror::Object* obj : false_gray_stack_) {
817 DCHECK(IsMarked(obj));
818 // The object could be white here if a thread got preempted after a success at the
819 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
820 // still gray), and the thread ran to register it onto the false gray stack.
821 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
822 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
823 ReadBarrier::WhitePtr());
824 DCHECK(success);
825 }
826 }
827 false_gray_stack_.clear();
828}
829
830
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800831void ConcurrentCopying::IssueEmptyCheckpoint() {
832 Thread* self = Thread::Current();
833 EmptyCheckpoint check_point(this);
834 ThreadList* thread_list = Runtime::Current()->GetThreadList();
835 gc_barrier_->Init(self, 0);
836 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800837 // If there are no threads to wait which implys that all the checkpoint functions are finished,
838 // then no need to release the mutator lock.
839 if (barrier_count == 0) {
840 return;
841 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800842 // Release locks then wait for all mutator threads to pass the barrier.
843 Locks::mutator_lock_->SharedUnlock(self);
844 {
845 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
846 gc_barrier_->Increment(self, barrier_count);
847 }
848 Locks::mutator_lock_->SharedLock(self);
849}
850
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700851void ConcurrentCopying::ExpandGcMarkStack() {
852 DCHECK(gc_mark_stack_->IsFull());
853 const size_t new_size = gc_mark_stack_->Capacity() * 2;
854 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
855 gc_mark_stack_->End());
856 gc_mark_stack_->Resize(new_size);
857 for (auto& ref : temp) {
858 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
859 }
860 DCHECK(!gc_mark_stack_->IsFull());
861}
862
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800863void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700864 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800865 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700866 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
867 CHECK(thread_running_gc_ != nullptr);
868 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700869 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
870 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700871 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
872 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700873 if (UNLIKELY(gc_mark_stack_->IsFull())) {
874 ExpandGcMarkStack();
875 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700876 gc_mark_stack_->PushBack(to_ref);
877 } else {
878 // Otherwise, use a thread-local mark stack.
879 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
880 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
881 MutexLock mu(self, mark_stack_lock_);
882 // Get a new thread local mark stack.
883 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
884 if (!pooled_mark_stacks_.empty()) {
885 // Use a pooled mark stack.
886 new_tl_mark_stack = pooled_mark_stacks_.back();
887 pooled_mark_stacks_.pop_back();
888 } else {
889 // None pooled. Create a new one.
890 new_tl_mark_stack =
891 accounting::AtomicStack<mirror::Object>::Create(
892 "thread local mark stack", 4 * KB, 4 * KB);
893 }
894 DCHECK(new_tl_mark_stack != nullptr);
895 DCHECK(new_tl_mark_stack->IsEmpty());
896 new_tl_mark_stack->PushBack(to_ref);
897 self->SetThreadLocalMarkStack(new_tl_mark_stack);
898 if (tl_mark_stack != nullptr) {
899 // Store the old full stack into a vector.
900 revoked_mark_stacks_.push_back(tl_mark_stack);
901 }
902 } else {
903 tl_mark_stack->PushBack(to_ref);
904 }
905 }
906 } else if (mark_stack_mode == kMarkStackModeShared) {
907 // Access the shared GC mark stack with a lock.
908 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700909 if (UNLIKELY(gc_mark_stack_->IsFull())) {
910 ExpandGcMarkStack();
911 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700912 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800913 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700914 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700915 static_cast<uint32_t>(kMarkStackModeGcExclusive))
916 << "ref=" << to_ref
917 << " self->gc_marking=" << self->GetIsGcMarking()
918 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700919 CHECK(self == thread_running_gc_)
920 << "Only GC-running thread should access the mark stack "
921 << "in the GC exclusive mark stack mode";
922 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700923 if (UNLIKELY(gc_mark_stack_->IsFull())) {
924 ExpandGcMarkStack();
925 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700926 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800927 }
928}
929
930accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
931 return heap_->allocation_stack_.get();
932}
933
934accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
935 return heap_->live_stack_.get();
936}
937
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800938// The following visitors are used to verify that there's no references to the from-space left after
939// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700940class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800941 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700942 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800943 : collector_(collector) {}
944
945 void operator()(mirror::Object* ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700946 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800947 if (ref == nullptr) {
948 // OK.
949 return;
950 }
951 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
952 if (kUseBakerReadBarrier) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700953 CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800954 << "Ref " << ref << " " << PrettyTypeOf(ref)
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700955 << " has non-white rb_ptr ";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800956 }
957 }
958
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700959 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700960 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800961 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700962 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800963 }
964
965 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700966 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800967};
968
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700969class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800970 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700971 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800972 : collector_(collector) {}
973
Mathieu Chartier31e88222016-10-14 18:43:19 -0700974 void operator()(ObjPtr<mirror::Object> obj,
975 MemberOffset offset,
976 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700977 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800978 mirror::Object* ref =
979 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700980 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800981 visitor(ref);
982 }
Mathieu Chartier31e88222016-10-14 18:43:19 -0700983 void operator()(ObjPtr<mirror::Class> klass,
984 ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700985 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800986 CHECK(klass->IsTypeOfReferenceClass());
987 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
988 }
989
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700990 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700991 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700992 if (!root->IsNull()) {
993 VisitRoot(root);
994 }
995 }
996
997 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700998 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700999 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001000 visitor(root->AsMirrorPtr());
1001 }
1002
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001003 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001004 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001005};
1006
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001007class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001008 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001009 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001010 : collector_(collector) {}
1011 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001012 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001013 ObjectCallback(obj, collector_);
1014 }
1015 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001016 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001017 CHECK(obj != nullptr);
1018 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1019 space::RegionSpace* region_space = collector->RegionSpace();
1020 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001021 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001022 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001023 if (kUseBakerReadBarrier) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -07001024 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001025 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001026 }
1027 }
1028
1029 private:
1030 ConcurrentCopying* const collector_;
1031};
1032
1033// Verify there's no from-space references left after the marking phase.
1034void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1035 Thread* self = Thread::Current();
1036 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001037 // Verify all threads have is_gc_marking to be false
1038 {
1039 MutexLock mu(self, *Locks::thread_list_lock_);
1040 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1041 for (Thread* thread : thread_list) {
1042 CHECK(!thread->GetIsGcMarking());
1043 }
1044 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001045 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001046 // Roots.
1047 {
1048 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001049 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001050 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001051 }
1052 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001053 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001054 // Non-moving spaces.
1055 {
1056 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1057 heap_->GetMarkBitmap()->Visit(visitor);
1058 }
1059 // The alloc stack.
1060 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001061 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001062 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1063 it < end; ++it) {
1064 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001065 if (obj != nullptr && obj->GetClass() != nullptr) {
1066 // TODO: need to call this only if obj is alive?
1067 ref_visitor(obj);
1068 visitor(obj);
1069 }
1070 }
1071 }
1072 // TODO: LOS. But only refs in LOS are classes.
1073}
1074
1075// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001076class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001077 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001078 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001079 : collector_(collector) {}
1080
1081 void operator()(mirror::Object* ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001082 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001083 if (ref == nullptr) {
1084 // OK.
1085 return;
1086 }
1087 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1088 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001089
1090 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001091 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001092};
1093
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001094class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001095 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001096 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001097 : collector_(collector) {}
1098
Mathieu Chartier31e88222016-10-14 18:43:19 -07001099 void operator()(ObjPtr<mirror::Object> obj,
1100 MemberOffset offset,
1101 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001102 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001103 mirror::Object* ref =
1104 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001105 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001106 visitor(ref);
1107 }
Mathieu Chartier31e88222016-10-14 18:43:19 -07001108 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001109 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001110 CHECK(klass->IsTypeOfReferenceClass());
1111 }
1112
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001113 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001114 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001115 if (!root->IsNull()) {
1116 VisitRoot(root);
1117 }
1118 }
1119
1120 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001121 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001122 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001123 visitor(root->AsMirrorPtr());
1124 }
1125
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001126 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001127 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001128};
1129
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001130class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001131 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001132 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001133 : collector_(collector) {}
1134 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001135 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001136 ObjectCallback(obj, collector_);
1137 }
1138 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001139 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001140 CHECK(obj != nullptr);
1141 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1142 space::RegionSpace* region_space = collector->RegionSpace();
1143 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1144 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001145 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001146 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001147 }
1148
1149 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001150 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001151};
1152
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001153class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001154 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001155 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1156 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001157 : concurrent_copying_(concurrent_copying),
1158 disable_weak_ref_access_(disable_weak_ref_access) {
1159 }
1160
1161 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1162 // Note: self is not necessarily equal to thread since thread may be suspended.
1163 Thread* self = Thread::Current();
1164 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1165 << thread->GetState() << " thread " << thread << " self " << self;
1166 // Revoke thread local mark stacks.
1167 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1168 if (tl_mark_stack != nullptr) {
1169 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1170 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1171 thread->SetThreadLocalMarkStack(nullptr);
1172 }
1173 // Disable weak ref access.
1174 if (disable_weak_ref_access_) {
1175 thread->SetWeakRefAccessEnabled(false);
1176 }
1177 // If thread is a running mutator, then act on behalf of the garbage collector.
1178 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001179 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001180 }
1181
1182 private:
1183 ConcurrentCopying* const concurrent_copying_;
1184 const bool disable_weak_ref_access_;
1185};
1186
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001187void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
1188 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001189 Thread* self = Thread::Current();
1190 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1191 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1192 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001193 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001194 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1195 // then no need to release the mutator lock.
1196 if (barrier_count == 0) {
1197 return;
1198 }
1199 Locks::mutator_lock_->SharedUnlock(self);
1200 {
1201 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1202 gc_barrier_->Increment(self, barrier_count);
1203 }
1204 Locks::mutator_lock_->SharedLock(self);
1205}
1206
1207void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1208 Thread* self = Thread::Current();
1209 CHECK_EQ(self, thread);
1210 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1211 if (tl_mark_stack != nullptr) {
1212 CHECK(is_marking_);
1213 MutexLock mu(self, mark_stack_lock_);
1214 revoked_mark_stacks_.push_back(tl_mark_stack);
1215 thread->SetThreadLocalMarkStack(nullptr);
1216 }
1217}
1218
1219void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001220 if (kVerboseMode) {
1221 LOG(INFO) << "ProcessMarkStack. ";
1222 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001223 bool empty_prev = false;
1224 while (true) {
1225 bool empty = ProcessMarkStackOnce();
1226 if (empty_prev && empty) {
1227 // Saw empty mark stack for a second time, done.
1228 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001229 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001230 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001231 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001232}
1233
1234bool ConcurrentCopying::ProcessMarkStackOnce() {
1235 Thread* self = Thread::Current();
1236 CHECK(thread_running_gc_ != nullptr);
1237 CHECK(self == thread_running_gc_);
1238 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1239 size_t count = 0;
1240 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1241 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1242 // Process the thread-local mark stacks and the GC mark stack.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001243 count += ProcessThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001244 while (!gc_mark_stack_->IsEmpty()) {
1245 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1246 ProcessMarkStackRef(to_ref);
1247 ++count;
1248 }
1249 gc_mark_stack_->Reset();
1250 } else if (mark_stack_mode == kMarkStackModeShared) {
1251 // Process the shared GC mark stack with a lock.
1252 {
1253 MutexLock mu(self, mark_stack_lock_);
1254 CHECK(revoked_mark_stacks_.empty());
1255 }
1256 while (true) {
1257 std::vector<mirror::Object*> refs;
1258 {
1259 // Copy refs with lock. Note the number of refs should be small.
1260 MutexLock mu(self, mark_stack_lock_);
1261 if (gc_mark_stack_->IsEmpty()) {
1262 break;
1263 }
1264 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1265 p != gc_mark_stack_->End(); ++p) {
1266 refs.push_back(p->AsMirrorPtr());
1267 }
1268 gc_mark_stack_->Reset();
1269 }
1270 for (mirror::Object* ref : refs) {
1271 ProcessMarkStackRef(ref);
1272 ++count;
1273 }
1274 }
1275 } else {
1276 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1277 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1278 {
1279 MutexLock mu(self, mark_stack_lock_);
1280 CHECK(revoked_mark_stacks_.empty());
1281 }
1282 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1283 while (!gc_mark_stack_->IsEmpty()) {
1284 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1285 ProcessMarkStackRef(to_ref);
1286 ++count;
1287 }
1288 gc_mark_stack_->Reset();
1289 }
1290
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001291 // Return true if the stack was empty.
1292 return count == 0;
1293}
1294
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001295size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
1296 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001297 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001298 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001299 size_t count = 0;
1300 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1301 {
1302 MutexLock mu(Thread::Current(), mark_stack_lock_);
1303 // Make a copy of the mark stack vector.
1304 mark_stacks = revoked_mark_stacks_;
1305 revoked_mark_stacks_.clear();
1306 }
1307 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1308 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1309 mirror::Object* to_ref = p->AsMirrorPtr();
1310 ProcessMarkStackRef(to_ref);
1311 ++count;
1312 }
1313 {
1314 MutexLock mu(Thread::Current(), mark_stack_lock_);
1315 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1316 // The pool has enough. Delete it.
1317 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001318 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001319 // Otherwise, put it into the pool for later reuse.
1320 mark_stack->Reset();
1321 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001322 }
1323 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001324 }
1325 return count;
1326}
1327
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001328inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001329 DCHECK(!region_space_->IsInFromSpace(to_ref));
1330 if (kUseBakerReadBarrier) {
1331 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1332 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1333 << " is_marked=" << IsMarked(to_ref);
1334 }
Mathieu Chartierc381c362016-08-23 13:27:53 -07001335 bool add_to_live_bytes = false;
1336 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1337 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
1338 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
1339 // It may be already marked if we accidentally pushed the same object twice due to the racy
1340 // bitmap read in MarkUnevacFromSpaceRegion.
1341 Scan(to_ref);
1342 // Only add to the live bytes if the object was not already marked.
1343 add_to_live_bytes = true;
1344 }
1345 } else {
1346 Scan(to_ref);
1347 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001348 if (kUseBakerReadBarrier) {
1349 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1350 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1351 << " is_marked=" << IsMarked(to_ref);
1352 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001353#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1354 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1355 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1356 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001357 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1358 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001359 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001360 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001361 // We may occasionally leave a reference white in the queue if its referent happens to be
1362 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1363 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1364 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001365 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001366 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1367 ReadBarrier::GrayPtr(),
1368 ReadBarrier::WhitePtr());
1369 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001370 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001371 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001372#else
1373 DCHECK(!kUseBakerReadBarrier);
1374#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001375
Mathieu Chartierc381c362016-08-23 13:27:53 -07001376 if (add_to_live_bytes) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001377 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1378 // GC-running thread (no synchronization required).
1379 DCHECK(region_space_bitmap_->Test(to_ref));
1380 // Disable the read barrier in SizeOf for performance, which is safe.
1381 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1382 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1383 region_space_->AddLiveBytes(to_ref, alloc_size);
1384 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001385 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001386 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001387 visitor(to_ref);
1388 }
1389}
1390
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001391class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
1392 public:
1393 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
1394 : concurrent_copying_(concurrent_copying) {
1395 }
1396
1397 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
1398 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
1399 // to avoid a deadlock b/31500969.
1400 CHECK(concurrent_copying_->weak_ref_access_enabled_);
1401 concurrent_copying_->weak_ref_access_enabled_ = false;
1402 }
1403
1404 private:
1405 ConcurrentCopying* const concurrent_copying_;
1406};
1407
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001408void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1409 Thread* self = Thread::Current();
1410 CHECK(thread_running_gc_ != nullptr);
1411 CHECK_EQ(self, thread_running_gc_);
1412 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1413 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1414 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1415 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1416 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001417 DisableWeakRefAccessCallback dwrac(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001418 // Process the thread local mark stacks one last time after switching to the shared mark stack
1419 // mode and disable weak ref accesses.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001420 ProcessThreadLocalMarkStacks(true, &dwrac);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001421 if (kVerboseMode) {
1422 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1423 }
1424}
1425
1426void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1427 Thread* self = Thread::Current();
1428 CHECK(thread_running_gc_ != nullptr);
1429 CHECK_EQ(self, thread_running_gc_);
1430 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1431 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1432 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1433 static_cast<uint32_t>(kMarkStackModeShared));
1434 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1435 QuasiAtomic::ThreadFenceForConstructor();
1436 if (kVerboseMode) {
1437 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1438 }
1439}
1440
1441void ConcurrentCopying::CheckEmptyMarkStack() {
1442 Thread* self = Thread::Current();
1443 CHECK(thread_running_gc_ != nullptr);
1444 CHECK_EQ(self, thread_running_gc_);
1445 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1446 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1447 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1448 // Thread-local mark stack mode.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001449 RevokeThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001450 MutexLock mu(Thread::Current(), mark_stack_lock_);
1451 if (!revoked_mark_stacks_.empty()) {
1452 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1453 while (!mark_stack->IsEmpty()) {
1454 mirror::Object* obj = mark_stack->PopBack();
1455 if (kUseBakerReadBarrier) {
1456 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1457 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1458 << " is_marked=" << IsMarked(obj);
1459 } else {
1460 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1461 << " is_marked=" << IsMarked(obj);
1462 }
1463 }
1464 }
1465 LOG(FATAL) << "mark stack is not empty";
1466 }
1467 } else {
1468 // Shared, GC-exclusive, or off.
1469 MutexLock mu(Thread::Current(), mark_stack_lock_);
1470 CHECK(gc_mark_stack_->IsEmpty());
1471 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001472 }
1473}
1474
1475void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1476 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1477 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001478 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001479}
1480
1481void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1482 {
1483 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1484 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1485 if (kEnableFromSpaceAccountingCheck) {
1486 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1487 }
1488 heap_->MarkAllocStackAsLive(live_stack);
1489 live_stack->Reset();
1490 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001491 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001492 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1493 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1494 if (space->IsContinuousMemMapAllocSpace()) {
1495 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001496 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001497 continue;
1498 }
1499 TimingLogger::ScopedTiming split2(
1500 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1501 RecordFree(alloc_space->Sweep(swap_bitmaps));
1502 }
1503 }
1504 SweepLargeObjects(swap_bitmaps);
1505}
1506
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001507void ConcurrentCopying::MarkZygoteLargeObjects() {
1508 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1509 Thread* const self = Thread::Current();
1510 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
1511 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
1512 // Pick the current live bitmap (mark bitmap if swapped).
1513 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
1514 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
1515 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
1516 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
1517 reinterpret_cast<uintptr_t>(los->End()),
1518 [mark_bitmap, los, self](mirror::Object* obj)
1519 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001520 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001521 if (los->IsZygoteLargeObject(self, obj)) {
1522 mark_bitmap->Set(obj);
1523 }
1524 });
1525}
1526
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001527void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1528 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1529 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1530}
1531
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001532void ConcurrentCopying::ReclaimPhase() {
1533 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1534 if (kVerboseMode) {
1535 LOG(INFO) << "GC ReclaimPhase";
1536 }
1537 Thread* self = Thread::Current();
1538
1539 {
1540 // Double-check that the mark stack is empty.
1541 // Note: need to set this after VerifyNoFromSpaceRef().
1542 is_asserting_to_space_invariant_ = false;
1543 QuasiAtomic::ThreadFenceForConstructor();
1544 if (kVerboseMode) {
1545 LOG(INFO) << "Issue an empty check point. ";
1546 }
1547 IssueEmptyCheckpoint();
1548 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001549 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001550 if (kUseBakerReadBarrier) {
1551 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1552 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001553 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001554 }
1555
1556 {
1557 // Record freed objects.
1558 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1559 // Don't include thread-locals that are in the to-space.
1560 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1561 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1562 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1563 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1564 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001565 cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001566 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001567 cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001568 if (kEnableFromSpaceAccountingCheck) {
1569 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1570 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1571 }
1572 CHECK_LE(to_objects, from_objects);
1573 CHECK_LE(to_bytes, from_bytes);
1574 int64_t freed_bytes = from_bytes - to_bytes;
1575 int64_t freed_objects = from_objects - to_objects;
1576 if (kVerboseMode) {
1577 LOG(INFO) << "RecordFree:"
1578 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1579 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1580 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1581 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1582 << " from_space size=" << region_space_->FromSpaceSize()
1583 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1584 << " to_space size=" << region_space_->ToSpaceSize();
1585 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1586 }
1587 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1588 if (kVerboseMode) {
1589 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1590 }
1591 }
1592
1593 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001594 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1595 region_space_->ClearFromSpace();
1596 }
1597
1598 {
1599 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001600 Sweep(false);
1601 SwapBitmaps();
1602 heap_->UnBindBitmaps();
1603
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001604 // Delete the region bitmap.
1605 DCHECK(region_space_bitmap_ != nullptr);
1606 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001607 region_space_bitmap_ = nullptr;
1608 }
1609
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001610 CheckEmptyMarkStack();
1611
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001612 if (kVerboseMode) {
1613 LOG(INFO) << "GC end of ReclaimPhase";
1614 }
1615}
1616
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001617// Assert the to-space invariant.
1618void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1619 mirror::Object* ref) {
1620 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1621 if (is_asserting_to_space_invariant_) {
1622 if (region_space_->IsInToSpace(ref)) {
1623 // OK.
1624 return;
1625 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001626 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001627 } else if (region_space_->IsInFromSpace(ref)) {
1628 // Not OK. Do extra logging.
1629 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001630 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001631 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001632 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001633 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1634 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001635 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1636 }
1637 }
1638}
1639
1640class RootPrinter {
1641 public:
1642 RootPrinter() { }
1643
1644 template <class MirrorType>
1645 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001646 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001647 if (!root->IsNull()) {
1648 VisitRoot(root);
1649 }
1650 }
1651
1652 template <class MirrorType>
1653 void VisitRoot(mirror::Object** root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001654 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001655 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001656 }
1657
1658 template <class MirrorType>
1659 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001660 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001661 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001662 }
1663};
1664
1665void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1666 mirror::Object* ref) {
1667 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1668 if (is_asserting_to_space_invariant_) {
1669 if (region_space_->IsInToSpace(ref)) {
1670 // OK.
1671 return;
1672 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001673 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001674 } else if (region_space_->IsInFromSpace(ref)) {
1675 // Not OK. Do extra logging.
1676 if (gc_root_source == nullptr) {
1677 // No info.
1678 } else if (gc_root_source->HasArtField()) {
1679 ArtField* field = gc_root_source->GetArtField();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001680 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " " << PrettyField(field);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001681 RootPrinter root_printer;
1682 field->VisitRoots(root_printer);
1683 } else if (gc_root_source->HasArtMethod()) {
1684 ArtMethod* method = gc_root_source->GetArtMethod();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001685 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " " << PrettyMethod(method);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001686 RootPrinter root_printer;
Andreas Gampe542451c2016-07-26 09:02:02 -07001687 method->VisitRoots(root_printer, kRuntimePointerSize);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001688 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001689 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
1690 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
1691 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
1692 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001693 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1694 } else {
1695 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1696 }
1697 }
1698}
1699
1700void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1701 if (kUseBakerReadBarrier) {
1702 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1703 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1704 } else {
1705 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1706 }
1707 if (region_space_->IsInFromSpace(obj)) {
1708 LOG(INFO) << "holder is in the from-space.";
1709 } else if (region_space_->IsInToSpace(obj)) {
1710 LOG(INFO) << "holder is in the to-space.";
1711 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1712 LOG(INFO) << "holder is in the unevac from-space.";
Mathieu Chartierc381c362016-08-23 13:27:53 -07001713 if (IsMarkedInUnevacFromSpace(obj)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001714 LOG(INFO) << "holder is marked in the region space bitmap.";
1715 } else {
1716 LOG(INFO) << "holder is not marked in the region space bitmap.";
1717 }
1718 } else {
1719 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001720 if (immune_spaces_.ContainsObject(obj)) {
1721 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001722 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001723 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001724 accounting::ContinuousSpaceBitmap* mark_bitmap =
1725 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1726 accounting::LargeObjectBitmap* los_bitmap =
1727 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1728 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1729 bool is_los = mark_bitmap == nullptr;
1730 if (!is_los && mark_bitmap->Test(obj)) {
1731 LOG(INFO) << "holder is marked in the mark bit map.";
1732 } else if (is_los && los_bitmap->Test(obj)) {
1733 LOG(INFO) << "holder is marked in the los bit map.";
1734 } else {
1735 // If ref is on the allocation stack, then it is considered
1736 // mark/alive (but not necessarily on the live stack.)
1737 if (IsOnAllocStack(obj)) {
1738 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001739 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001740 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001741 }
1742 }
1743 }
1744 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001745 LOG(INFO) << "offset=" << offset.SizeValue();
1746}
1747
1748void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1749 mirror::Object* ref) {
1750 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001751 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001752 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001753 // Immune object may not be gray if called from the GC.
1754 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1755 return;
1756 }
1757 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1758 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001759 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001760 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1761 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1762 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001763 }
1764 } else {
1765 accounting::ContinuousSpaceBitmap* mark_bitmap =
1766 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1767 accounting::LargeObjectBitmap* los_bitmap =
1768 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1769 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1770 bool is_los = mark_bitmap == nullptr;
1771 if ((!is_los && mark_bitmap->Test(ref)) ||
1772 (is_los && los_bitmap->Test(ref))) {
1773 // OK.
1774 } else {
1775 // If ref is on the allocation stack, then it may not be
1776 // marked live, but considered marked/alive (but not
1777 // necessarily on the live stack).
1778 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1779 << "obj=" << obj << " ref=" << ref;
1780 }
1781 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001782}
1783
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001784// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001785class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001786 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001787 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001788 : collector_(collector) {}
1789
Mathieu Chartier31e88222016-10-14 18:43:19 -07001790 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001791 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
1792 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartier31e88222016-10-14 18:43:19 -07001793 collector_->Process(obj.Ptr(), offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001794 }
1795
Mathieu Chartier31e88222016-10-14 18:43:19 -07001796 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001797 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001798 CHECK(klass->IsTypeOfReferenceClass());
1799 collector_->DelayReferenceReferent(klass, ref);
1800 }
1801
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001802 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001803 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001804 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001805 if (!root->IsNull()) {
1806 VisitRoot(root);
1807 }
1808 }
1809
1810 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001811 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001812 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001813 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001814 }
1815
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001816 private:
1817 ConcurrentCopying* const collector_;
1818};
1819
1820// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001821inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001822 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001823 // Avoid all read barriers during visit references to help performance.
1824 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1825 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001826 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001827 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001828 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001829 // Disable the read barrier for a performance reason.
1830 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1831 visitor, visitor);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001832 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001833 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1834 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001835}
1836
1837// Process a field.
1838inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001839 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001840 mirror::Object* ref = obj->GetFieldObject<
1841 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Mathieu Chartierc381c362016-08-23 13:27:53 -07001842 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001843 if (to_ref == ref) {
1844 return;
1845 }
1846 // This may fail if the mutator writes to the field at the same time. But it's ok.
1847 mirror::Object* expected_ref = ref;
1848 mirror::Object* new_ref = to_ref;
1849 do {
1850 if (expected_ref !=
1851 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1852 // It was updated by the mutator.
1853 break;
1854 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001855 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001856 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001857}
1858
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001859// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001860inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001861 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1862 for (size_t i = 0; i < count; ++i) {
1863 mirror::Object** root = roots[i];
1864 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001865 mirror::Object* to_ref = Mark(ref);
1866 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001867 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001868 }
1869 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1870 mirror::Object* expected_ref = ref;
1871 mirror::Object* new_ref = to_ref;
1872 do {
1873 if (expected_ref != addr->LoadRelaxed()) {
1874 // It was updated by the mutator.
1875 break;
1876 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001877 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001878 }
1879}
1880
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001881template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001882inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001883 DCHECK(!root->IsNull());
1884 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001885 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001886 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001887 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1888 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1889 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001890 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001891 do {
1892 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1893 // It was updated by the mutator.
1894 break;
1895 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001896 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001897 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001898}
1899
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001900inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001901 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1902 const RootInfo& info ATTRIBUTE_UNUSED) {
1903 for (size_t i = 0; i < count; ++i) {
1904 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1905 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001906 // kGrayImmuneObject is true because this is used for the thread flip.
1907 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001908 }
1909 }
1910}
1911
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001912// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1913class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1914 public:
1915 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1916 : collector_(collector), enabled_(false) {
1917 if (kUseBakerReadBarrier &&
1918 collector_->thread_running_gc_ == Thread::Current() &&
1919 !collector_->gc_grays_immune_objects_) {
1920 collector_->gc_grays_immune_objects_ = true;
1921 enabled_ = true;
1922 }
1923 }
1924
1925 ~ScopedGcGraysImmuneObjects() {
1926 if (kUseBakerReadBarrier &&
1927 collector_->thread_running_gc_ == Thread::Current() &&
1928 enabled_) {
1929 DCHECK(collector_->gc_grays_immune_objects_);
1930 collector_->gc_grays_immune_objects_ = false;
1931 }
1932 }
1933
1934 private:
1935 ConcurrentCopying* const collector_;
1936 bool enabled_;
1937};
1938
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001939// Fill the given memory block with a dummy object. Used to fill in a
1940// copy of objects that was lost in race.
1941void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001942 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1943 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1944 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1945 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001946 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001947 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001948 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
1949 // Explicitly mark to make sure to get an object in the to-space.
1950 mirror::Class* int_array_class = down_cast<mirror::Class*>(
1951 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001952 CHECK(int_array_class != nullptr);
1953 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001954 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001955 CHECK_EQ(component_size, sizeof(int32_t));
1956 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1957 if (data_offset > byte_size) {
1958 // An int array is too big. Use java.lang.Object.
Mathieu Chartierc4f39252016-10-05 18:32:08 -07001959 ObjPtr<mirror::Class> java_lang_Object =
1960 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1961 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object.Ptr());
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001962 CHECK_EQ(byte_size, (java_lang_Object->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
Mathieu Chartierc4f39252016-10-05 18:32:08 -07001963 dummy_obj->SetClass(java_lang_Object.Ptr());
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001964 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001965 } else {
1966 // Use an int array.
1967 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001968 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001969 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001970 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
1971 dummy_arr->SetLength(length);
1972 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001973 << "byte_size=" << byte_size << " length=" << length
1974 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001975 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001976 << "byte_size=" << byte_size << " length=" << length
1977 << " component_size=" << component_size << " data_offset=" << data_offset;
1978 }
1979}
1980
1981// Reuse the memory blocks that were copy of objects that were lost in race.
1982mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1983 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001984 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001985 Thread* self = Thread::Current();
1986 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001987 size_t byte_size;
1988 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001989 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001990 MutexLock mu(self, skipped_blocks_lock_);
1991 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1992 if (it == skipped_blocks_map_.end()) {
1993 // Not found.
1994 return nullptr;
1995 }
1996 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001997 CHECK_GE(byte_size, alloc_size);
1998 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1999 // If remainder would be too small for a dummy object, retry with a larger request size.
2000 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
2001 if (it == skipped_blocks_map_.end()) {
2002 // Not found.
2003 return nullptr;
2004 }
Roland Levillain14d90572015-07-16 10:52:26 +01002005 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002006 CHECK_GE(it->first - alloc_size, min_object_size)
2007 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
2008 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002009 // Found a block.
2010 CHECK(it != skipped_blocks_map_.end());
2011 byte_size = it->first;
2012 addr = it->second;
2013 CHECK_GE(byte_size, alloc_size);
2014 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
2015 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
2016 if (kVerboseMode) {
2017 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
2018 }
2019 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002020 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002021 memset(addr, 0, byte_size);
2022 if (byte_size > alloc_size) {
2023 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01002024 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002025 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002026 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
2027 // violation and possible deadlock. The deadlock case is a recursive case:
2028 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002029 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
2030 byte_size - alloc_size);
2031 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002032 {
2033 MutexLock mu(self, skipped_blocks_lock_);
2034 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
2035 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002036 }
2037 return reinterpret_cast<mirror::Object*>(addr);
2038}
2039
2040mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
2041 DCHECK(region_space_->IsInFromSpace(from_ref));
2042 // No read barrier to avoid nested RB that might violate the to-space
2043 // invariant. Note that from_ref is a from space ref so the SizeOf()
2044 // call will access the from-space meta objects, but it's ok and necessary.
2045 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
2046 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2047 size_t region_space_bytes_allocated = 0U;
2048 size_t non_moving_space_bytes_allocated = 0U;
2049 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002050 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002051 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002052 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002053 bytes_allocated = region_space_bytes_allocated;
2054 if (to_ref != nullptr) {
2055 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
2056 }
2057 bool fall_back_to_non_moving = false;
2058 if (UNLIKELY(to_ref == nullptr)) {
2059 // Failed to allocate in the region space. Try the skipped blocks.
2060 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
2061 if (to_ref != nullptr) {
2062 // Succeeded to allocate in a skipped block.
2063 if (heap_->use_tlab_) {
2064 // This is necessary for the tlab case as it's not accounted in the space.
2065 region_space_->RecordAlloc(to_ref);
2066 }
2067 bytes_allocated = region_space_alloc_size;
2068 } else {
2069 // Fall back to the non-moving space.
2070 fall_back_to_non_moving = true;
2071 if (kVerboseMode) {
2072 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
2073 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
2074 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
2075 }
2076 fall_back_to_non_moving = true;
2077 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002078 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002079 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
2080 bytes_allocated = non_moving_space_bytes_allocated;
2081 // Mark it in the mark bitmap.
2082 accounting::ContinuousSpaceBitmap* mark_bitmap =
2083 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2084 CHECK(mark_bitmap != nullptr);
2085 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
2086 }
2087 }
2088 DCHECK(to_ref != nullptr);
2089
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002090 // Copy the object excluding the lock word since that is handled in the loop.
2091 to_ref->SetClass(from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>());
2092 const size_t kObjectHeaderSize = sizeof(mirror::Object);
2093 DCHECK_GE(obj_size, kObjectHeaderSize);
2094 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
2095 sizeof(LockWord),
2096 "Object header size does not match");
2097 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
2098 // object in the from space is immutable other than the lock word. b/31423258
2099 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
2100 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
2101 obj_size - kObjectHeaderSize);
2102
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002103 // Attempt to install the forward pointer. This is in a loop as the
2104 // lock word atomic write can fail.
2105 while (true) {
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002106 LockWord old_lock_word = from_ref->GetLockWord(false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002107
2108 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2109 // Lost the race. Another thread (either GC or mutator) stored
2110 // the forwarding pointer first. Make the lost copy (to_ref)
2111 // look like a valid but dead (dummy) object and keep it for
2112 // future reuse.
2113 FillWithDummyObject(to_ref, bytes_allocated);
2114 if (!fall_back_to_non_moving) {
2115 DCHECK(region_space_->IsInToSpace(to_ref));
2116 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2117 // Free the large alloc.
2118 region_space_->FreeLarge(to_ref, bytes_allocated);
2119 } else {
2120 // Record the lost copy for later reuse.
2121 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2122 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2123 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2124 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2125 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2126 reinterpret_cast<uint8_t*>(to_ref)));
2127 }
2128 } else {
2129 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2130 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2131 // Free the non-moving-space chunk.
2132 accounting::ContinuousSpaceBitmap* mark_bitmap =
2133 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2134 CHECK(mark_bitmap != nullptr);
2135 CHECK(mark_bitmap->Clear(to_ref));
2136 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2137 }
2138
2139 // Get the winner's forward ptr.
2140 mirror::Object* lost_fwd_ptr = to_ref;
2141 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2142 CHECK(to_ref != nullptr);
2143 CHECK_NE(to_ref, lost_fwd_ptr);
Mathieu Chartierdfcd6f42016-09-13 10:02:48 -07002144 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
2145 << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002146 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2147 return to_ref;
2148 }
2149
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002150 // Copy the old lock word over since we did not copy it yet.
2151 to_ref->SetLockWord(old_lock_word, false);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002152 // Set the gray ptr.
2153 if (kUseBakerReadBarrier) {
2154 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
2155 }
2156
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002157 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2158
2159 // Try to atomically write the fwd ptr.
2160 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
2161 if (LIKELY(success)) {
2162 // The CAS succeeded.
2163 objects_moved_.FetchAndAddSequentiallyConsistent(1);
2164 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
2165 if (LIKELY(!fall_back_to_non_moving)) {
2166 DCHECK(region_space_->IsInToSpace(to_ref));
2167 } else {
2168 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2169 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2170 }
2171 if (kUseBakerReadBarrier) {
2172 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2173 }
2174 DCHECK(GetFwdPtr(from_ref) == to_ref);
2175 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002176 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002177 return to_ref;
2178 } else {
2179 // The CAS failed. It may have lost the race or may have failed
2180 // due to monitor/hashcode ops. Either way, retry.
2181 }
2182 }
2183}
2184
2185mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2186 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002187 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2188 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002189 // It's already marked.
2190 return from_ref;
2191 }
2192 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002193 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002194 to_ref = GetFwdPtr(from_ref);
2195 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2196 heap_->non_moving_space_->HasAddress(to_ref))
2197 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002198 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07002199 if (IsMarkedInUnevacFromSpace(from_ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002200 to_ref = from_ref;
2201 } else {
2202 to_ref = nullptr;
2203 }
2204 } else {
2205 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002206 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002207 // An immune object is alive.
2208 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002209 } else {
2210 // Non-immune non-moving space. Use the mark bitmap.
2211 accounting::ContinuousSpaceBitmap* mark_bitmap =
2212 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2213 accounting::LargeObjectBitmap* los_bitmap =
2214 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2215 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2216 bool is_los = mark_bitmap == nullptr;
2217 if (!is_los && mark_bitmap->Test(from_ref)) {
2218 // Already marked.
2219 to_ref = from_ref;
2220 } else if (is_los && los_bitmap->Test(from_ref)) {
2221 // Already marked in LOS.
2222 to_ref = from_ref;
2223 } else {
2224 // Not marked.
2225 if (IsOnAllocStack(from_ref)) {
2226 // If on the allocation stack, it's considered marked.
2227 to_ref = from_ref;
2228 } else {
2229 // Not marked.
2230 to_ref = nullptr;
2231 }
2232 }
2233 }
2234 }
2235 return to_ref;
2236}
2237
2238bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2239 QuasiAtomic::ThreadFenceAcquire();
2240 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002241 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002242}
2243
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002244mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2245 // ref is in a non-moving space (from_ref == to_ref).
2246 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002247 DCHECK(!immune_spaces_.ContainsObject(ref));
2248 // Use the mark bitmap.
2249 accounting::ContinuousSpaceBitmap* mark_bitmap =
2250 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2251 accounting::LargeObjectBitmap* los_bitmap =
2252 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2253 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2254 bool is_los = mark_bitmap == nullptr;
2255 if (!is_los && mark_bitmap->Test(ref)) {
2256 // Already marked.
2257 if (kUseBakerReadBarrier) {
2258 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2259 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002260 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002261 } else if (is_los && los_bitmap->Test(ref)) {
2262 // Already marked in LOS.
2263 if (kUseBakerReadBarrier) {
2264 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2265 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2266 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002267 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002268 // Not marked.
2269 if (IsOnAllocStack(ref)) {
2270 // If it's on the allocation stack, it's considered marked. Keep it white.
2271 // Objects on the allocation stack need not be marked.
2272 if (!is_los) {
2273 DCHECK(!mark_bitmap->Test(ref));
2274 } else {
2275 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002276 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002277 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002278 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002279 }
2280 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002281 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2282 // kRegionTypeUnevacFromSpace-case comment in Mark().
2283 if (kUseBakerReadBarrier) {
2284 // Test the bitmap first to reduce the chance of false gray cases.
2285 if ((!is_los && mark_bitmap->Test(ref)) ||
2286 (is_los && los_bitmap->Test(ref))) {
2287 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002288 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002289 }
2290 // Not marked or on the allocation stack. Try to mark it.
2291 // This may or may not succeed, which is ok.
2292 bool cas_success = false;
2293 if (kUseBakerReadBarrier) {
2294 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
2295 ReadBarrier::GrayPtr());
2296 }
2297 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2298 // Already marked.
2299 if (kUseBakerReadBarrier && cas_success &&
2300 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2301 PushOntoFalseGrayStack(ref);
2302 }
2303 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2304 // Already marked in LOS.
2305 if (kUseBakerReadBarrier && cas_success &&
2306 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2307 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002308 }
2309 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002310 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002311 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002312 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002313 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002314 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002315 }
2316 }
2317 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002318 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002319}
2320
2321void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002322 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002323 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002324 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002325 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2326 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002327 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002328 {
2329 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2330 skipped_blocks_map_.clear();
2331 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002332 {
2333 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002334 {
2335 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2336 heap_->ClearMarkedObjects();
2337 }
2338 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2339 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2340 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2341 gc::Heap* const heap = Runtime::Current()->GetHeap();
2342 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2343 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2344 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
2345 // Filter out cards that don't need to be set.
2346 if (table != nullptr) {
2347 table->FilterCards();
2348 }
2349 }
2350 }
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002351 if (kUseBakerReadBarrier) {
2352 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
2353 DCHECK(rb_mark_bit_stack_.get() != nullptr);
2354 const auto* limit = rb_mark_bit_stack_->End();
2355 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
2356 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
2357 }
2358 rb_mark_bit_stack_->Reset();
2359 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002360 }
2361 if (measure_read_barrier_slow_path_) {
2362 MutexLock mu(self, rb_slow_path_histogram_lock_);
2363 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2364 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2365 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2366 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002367}
2368
Mathieu Chartier97509952015-07-13 14:35:43 -07002369bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002370 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002371 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002372 if (to_ref == nullptr) {
2373 return false;
2374 }
2375 if (from_ref != to_ref) {
2376 QuasiAtomic::ThreadFenceRelease();
2377 field->Assign(to_ref);
2378 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2379 }
2380 return true;
2381}
2382
Mathieu Chartier97509952015-07-13 14:35:43 -07002383mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2384 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002385}
2386
Mathieu Chartier31e88222016-10-14 18:43:19 -07002387void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
2388 ObjPtr<mirror::Reference> reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002389 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002390}
2391
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002392void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002393 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002394 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002395 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2396 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002397 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002398}
2399
2400void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2401 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2402 region_space_->RevokeAllThreadLocalBuffers();
2403}
2404
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002405mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2406 if (Thread::Current() != thread_running_gc_) {
2407 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2408 } else {
2409 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2410 }
2411 ScopedTrace tr(__FUNCTION__);
2412 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2413 mirror::Object* ret = Mark(from_ref);
2414 if (measure_read_barrier_slow_path_) {
2415 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2416 }
2417 return ret;
2418}
2419
2420void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2421 GarbageCollector::DumpPerformanceInfo(os);
2422 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2423 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2424 Histogram<uint64_t>::CumulativeData cumulative_data;
2425 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2426 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2427 }
2428 if (rb_slow_path_count_total_ > 0) {
2429 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2430 }
2431 if (rb_slow_path_count_gc_total_ > 0) {
2432 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2433 }
Mathieu Chartiercca44a02016-08-17 10:07:29 -07002434 os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
2435 os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002436}
2437
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002438} // namespace collector
2439} // namespace gc
2440} // namespace art