blob: 155e032aba6dbcb1ae0547d06b06848b6a97e1d4 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070020#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070021#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070022#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070023#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/accounting/heap_bitmap-inl.h"
25#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070026#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080028#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080029#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070031#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032#include "mirror/object-inl.h"
33#include "scoped_thread_state_change.h"
34#include "thread-inl.h"
35#include "thread_list.h"
36#include "well_known_classes.h"
37
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070038namespace art {
39namespace gc {
40namespace collector {
41
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070042static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
43
Mathieu Chartier56fe2582016-07-14 13:30:03 -070044ConcurrentCopying::ConcurrentCopying(Heap* heap,
45 const std::string& name_prefix,
46 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 : GarbageCollector(heap,
48 name_prefix + (name_prefix.empty() ? "" : " ") +
49 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070050 region_space_(nullptr), gc_barrier_(new Barrier(0)),
51 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070052 kDefaultGcMarkStackSize,
53 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070054 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
55 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080056 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070057 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070058 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
59 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080060 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070061 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
62 rb_slow_path_ns_(0),
63 rb_slow_path_count_(0),
64 rb_slow_path_count_gc_(0),
65 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
66 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
67 rb_slow_path_count_total_(0),
68 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080069 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070070 force_evacuate_all_(false),
71 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
72 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080073 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
74 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070075 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080077 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
78 // Cache this so that we won't have to lock heap_bitmap_lock_ in
79 // Mark() which could cause a nested lock on heap_bitmap_lock_
80 // when GC causes a RB while doing GC or a lock order violation
81 // (class_linker_lock_ and heap_bitmap_lock_).
82 heap_mark_bitmap_ = heap->GetMarkBitmap();
83 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070084 {
85 MutexLock mu(self, mark_stack_lock_);
86 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
87 accounting::AtomicStack<mirror::Object>* mark_stack =
88 accounting::AtomicStack<mirror::Object>::Create(
89 "thread local mark stack", kMarkStackSize, kMarkStackSize);
90 pooled_mark_stacks_.push_back(mark_stack);
91 }
92 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080093}
94
Mathieu Chartierb19ccb12015-07-15 10:24:16 -070095void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
96 // Used for preserving soft references, should be OK to not have a CAS here since there should be
97 // no other threads which can trigger read barriers on the same referent during reference
98 // processing.
99 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700100 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700101}
102
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800103ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700104 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800105}
106
107void ConcurrentCopying::RunPhases() {
108 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
109 CHECK(!is_active_);
110 is_active_ = true;
111 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700112 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800113 Locks::mutator_lock_->AssertNotHeld(self);
114 {
115 ReaderMutexLock mu(self, *Locks::mutator_lock_);
116 InitializePhase();
117 }
118 FlipThreadRoots();
119 {
120 ReaderMutexLock mu(self, *Locks::mutator_lock_);
121 MarkingPhase();
122 }
123 // Verify no from space refs. This causes a pause.
124 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
125 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
126 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700127 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800128 if (kVerboseMode) {
129 LOG(INFO) << "Verifying no from-space refs";
130 }
131 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700132 if (kVerboseMode) {
133 LOG(INFO) << "Done verifying no from-space refs";
134 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700135 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 }
137 {
138 ReaderMutexLock mu(self, *Locks::mutator_lock_);
139 ReclaimPhase();
140 }
141 FinishPhase();
142 CHECK(is_active_);
143 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700144 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800145}
146
147void ConcurrentCopying::BindBitmaps() {
148 Thread* self = Thread::Current();
149 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
150 // Mark all of the spaces we never collect as immune.
151 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800152 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
153 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800154 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800155 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800156 } else if (space == region_space_) {
157 accounting::ContinuousSpaceBitmap* bitmap =
158 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
159 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800160 region_space_bitmap_ = bitmap;
161 }
162 }
163}
164
165void ConcurrentCopying::InitializePhase() {
166 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
167 if (kVerboseMode) {
168 LOG(INFO) << "GC InitializePhase";
169 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
170 << reinterpret_cast<void*>(region_space_->Limit());
171 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700172 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800173 if (kIsDebugBuild) {
174 MutexLock mu(Thread::Current(), mark_stack_lock_);
175 CHECK(false_gray_stack_.empty());
176 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700177
178 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
179 if (measure_read_barrier_slow_path_) {
180 rb_slow_path_ns_.StoreRelaxed(0);
181 rb_slow_path_count_.StoreRelaxed(0);
182 rb_slow_path_count_gc_.StoreRelaxed(0);
183 }
184
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800185 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800186 bytes_moved_.StoreRelaxed(0);
187 objects_moved_.StoreRelaxed(0);
188 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
189 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
190 GetCurrentIteration()->GetClearSoftReferences()) {
191 force_evacuate_all_ = true;
192 } else {
193 force_evacuate_all_ = false;
194 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700195 if (kUseBakerReadBarrier) {
196 updated_all_immune_objects_.StoreRelaxed(false);
197 // GC may gray immune objects in the thread flip.
198 gc_grays_immune_objects_ = true;
199 if (kIsDebugBuild) {
200 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
201 DCHECK(immune_gray_stack_.empty());
202 }
203 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800204 BindBitmaps();
205 if (kVerboseMode) {
206 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800207 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
208 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
209 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
210 LOG(INFO) << "Immune space: " << *space;
211 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800212 LOG(INFO) << "GC end of InitializePhase";
213 }
214}
215
216// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700217class ConcurrentCopying::ThreadFlipVisitor : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800218 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100219 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800220 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
221 }
222
Mathieu Chartier90443472015-07-16 20:32:27 -0700223 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800224 // Note: self is not necessarily equal to thread since thread may be suspended.
225 Thread* self = Thread::Current();
226 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
227 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700228 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229 if (use_tlab_ && thread->HasTlab()) {
230 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
231 // This must come before the revoke.
232 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
233 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
234 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
235 FetchAndAddSequentiallyConsistent(thread_local_objects);
236 } else {
237 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
238 }
239 }
240 if (kUseThreadLocalAllocationStack) {
241 thread->RevokeThreadLocalAllocationStack();
242 }
243 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700244 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800245 concurrent_copying_->GetBarrier().Pass(self);
246 }
247
248 private:
249 ConcurrentCopying* const concurrent_copying_;
250 const bool use_tlab_;
251};
252
253// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700254class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800255 public:
256 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
257 : concurrent_copying_(concurrent_copying) {
258 }
259
Mathieu Chartier90443472015-07-16 20:32:27 -0700260 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800261 ConcurrentCopying* cc = concurrent_copying_;
262 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
263 // Note: self is not necessarily equal to thread since thread may be suspended.
264 Thread* self = Thread::Current();
265 CHECK(thread == self);
266 Locks::mutator_lock_->AssertExclusiveHeld(self);
267 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700268 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800269 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
270 cc->RecordLiveStackFreezeSize(self);
271 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
272 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
273 }
274 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700275 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800276 if (kIsDebugBuild) {
277 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
278 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800279 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800280 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800281 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700282 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800283 }
284 }
285
286 private:
287 ConcurrentCopying* const concurrent_copying_;
288};
289
290// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
291void ConcurrentCopying::FlipThreadRoots() {
292 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
293 if (kVerboseMode) {
294 LOG(INFO) << "time=" << region_space_->Time();
295 region_space_->DumpNonFreeRegions(LOG(INFO));
296 }
297 Thread* self = Thread::Current();
298 Locks::mutator_lock_->AssertNotHeld(self);
299 gc_barrier_->Init(self, 0);
300 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
301 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700302 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800303 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
304 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700305 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800306 {
307 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
308 gc_barrier_->Increment(self, barrier_count);
309 }
310 is_asserting_to_space_invariant_ = true;
311 QuasiAtomic::ThreadFenceForConstructor();
312 if (kVerboseMode) {
313 LOG(INFO) << "time=" << region_space_->Time();
314 region_space_->DumpNonFreeRegions(LOG(INFO));
315 LOG(INFO) << "GC end of FlipThreadRoots";
316 }
317}
318
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700319void ConcurrentCopying::SwapStacks() {
320 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800321}
322
323void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
324 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
325 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
326}
327
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800328class EmptyCheckpoint : public Closure {
329 public:
330 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
331 : concurrent_copying_(concurrent_copying) {
332 }
333
334 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
335 // Note: self is not necessarily equal to thread since thread may be suspended.
336 Thread* self = Thread::Current();
337 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
338 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800339 // If thread is a running mutator, then act on behalf of the garbage collector.
340 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700341 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800342 }
343
344 private:
345 ConcurrentCopying* const concurrent_copying_;
346};
347
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700348// Used to visit objects in the immune spaces.
349inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
350 DCHECK(obj != nullptr);
351 DCHECK(immune_spaces_.ContainsObject(obj));
352 // Update the fields without graying it or pushing it onto the mark stack.
353 Scan(obj);
354}
355
356class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
357 public:
358 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
359 : collector_(cc) {}
360
361 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
362 collector_->ScanImmuneObject(obj);
363 }
364
365 private:
366 ConcurrentCopying* const collector_;
367};
368
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800369// Concurrently mark roots that are guarded by read barriers and process the mark stack.
370void ConcurrentCopying::MarkingPhase() {
371 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
372 if (kVerboseMode) {
373 LOG(INFO) << "GC MarkingPhase";
374 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700375 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700376
377 // Scan immune spaces.
378 // Update all the fields in the immune spaces first without graying the objects so that we
379 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
380 // of the objects.
381 if (kUseBakerReadBarrier) {
382 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700383 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700384 for (auto& space : immune_spaces_.GetSpaces()) {
385 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
386 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
387 ImmuneSpaceScanObjVisitor visitor(this);
388 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
389 reinterpret_cast<uintptr_t>(space->Limit()),
390 visitor);
391 }
392 if (kUseBakerReadBarrier) {
393 // This release fence makes the field updates in the above loop visible before allowing mutator
394 // getting access to immune objects without graying it first.
395 updated_all_immune_objects_.StoreRelease(true);
396 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
397 // the above loop because we would incorrectly disable the read barrier by whitening an object
398 // which may point to an unscanned, white object, breaking the to-space invariant.
399 //
400 // Make sure no mutators are in the middle of marking an immune object before whitening immune
401 // objects.
402 IssueEmptyCheckpoint();
403 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
404 if (kVerboseMode) {
405 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
406 }
407 for (mirror::Object* obj : immune_gray_stack_) {
408 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
409 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
410 ReadBarrier::WhitePtr());
411 DCHECK(success);
412 }
413 immune_gray_stack_.clear();
414 }
415
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800416 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700417 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
418 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800419 }
420 {
421 // TODO: don't visit the transaction roots if it's not active.
422 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700423 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800424 }
425
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800426 Thread* self = Thread::Current();
427 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700428 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700429 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
430 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
431 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
432 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
433 // reach the point where we process weak references, we can avoid using a lock when accessing
434 // the GC mark stack, which makes mark stack processing more efficient.
435
436 // Process the mark stack once in the thread local stack mode. This marks most of the live
437 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
438 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
439 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800440 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700441 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
442 // for the last time before transitioning to the shared mark stack mode, which would process new
443 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
444 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
445 // important to do these together in a single checkpoint so that we can ensure that mutators
446 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
447 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
448 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
449 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
450 SwitchToSharedMarkStackMode();
451 CHECK(!self->GetWeakRefAccessEnabled());
452 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
453 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
454 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
455 // (via read barriers) have no way to produce any more refs to process. Marking converges once
456 // before we process weak refs below.
457 ProcessMarkStack();
458 CheckEmptyMarkStack();
459 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
460 // lock from this point on.
461 SwitchToGcExclusiveMarkStackMode();
462 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800463 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800464 LOG(INFO) << "ProcessReferences";
465 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700466 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700467 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700468 ProcessReferences(self);
469 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800470 if (kVerboseMode) {
471 LOG(INFO) << "SweepSystemWeaks";
472 }
473 SweepSystemWeaks(self);
474 if (kVerboseMode) {
475 LOG(INFO) << "SweepSystemWeaks done";
476 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700477 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
478 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
479 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800480 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700481 CheckEmptyMarkStack();
482 // Re-enable weak ref accesses.
483 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700484 // Free data for class loaders that we unloaded.
485 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700486 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700487 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800488 if (kUseBakerReadBarrier) {
489 ProcessFalseGrayStack();
490 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700491 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800492 }
493
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700494 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800495 if (kVerboseMode) {
496 LOG(INFO) << "GC end of MarkingPhase";
497 }
498}
499
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700500void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
501 if (kVerboseMode) {
502 LOG(INFO) << "ReenableWeakRefAccess";
503 }
504 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
505 QuasiAtomic::ThreadFenceForConstructor();
506 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
507 {
508 MutexLock mu(self, *Locks::thread_list_lock_);
509 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
510 for (Thread* thread : thread_list) {
511 thread->SetWeakRefAccessEnabled(true);
512 }
513 }
514 // Unblock blocking threads.
515 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
516 Runtime::Current()->BroadcastForNewSystemWeaks();
517}
518
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700519class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700520 public:
521 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
522 : concurrent_copying_(concurrent_copying) {
523 }
524
525 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
526 // Note: self is not necessarily equal to thread since thread may be suspended.
527 Thread* self = Thread::Current();
528 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
529 << thread->GetState() << " thread " << thread << " self " << self;
530 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700531 // Note a thread that has just started right before this checkpoint may have already this flag
532 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700533 thread->SetIsGcMarking(false);
534 // If thread is a running mutator, then act on behalf of the garbage collector.
535 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700536 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700537 }
538
539 private:
540 ConcurrentCopying* const concurrent_copying_;
541};
542
543void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
544 Thread* self = Thread::Current();
545 DisableMarkingCheckpoint check_point(this);
546 ThreadList* thread_list = Runtime::Current()->GetThreadList();
547 gc_barrier_->Init(self, 0);
548 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
549 // If there are no threads to wait which implies that all the checkpoint functions are finished,
550 // then no need to release the mutator lock.
551 if (barrier_count == 0) {
552 return;
553 }
554 // Release locks then wait for all mutator threads to pass the barrier.
555 Locks::mutator_lock_->SharedUnlock(self);
556 {
557 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
558 gc_barrier_->Increment(self, barrier_count);
559 }
560 Locks::mutator_lock_->SharedLock(self);
561}
562
563void ConcurrentCopying::DisableMarking() {
564 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
565 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
566 is_marking_ = false;
567 QuasiAtomic::ThreadFenceForConstructor();
568 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
569 // still in the middle of a read barrier which may have a from-space ref cached in a local
570 // variable.
571 IssueDisableMarkingCheckpoint();
572 if (kUseTableLookupReadBarrier) {
573 heap_->rb_table_->ClearAll();
574 DCHECK(heap_->rb_table_->IsAllCleared());
575 }
576 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
577 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
578}
579
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800580void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
581 CHECK(kUseBakerReadBarrier);
582 DCHECK(ref != nullptr);
583 MutexLock mu(Thread::Current(), mark_stack_lock_);
584 false_gray_stack_.push_back(ref);
585}
586
587void ConcurrentCopying::ProcessFalseGrayStack() {
588 CHECK(kUseBakerReadBarrier);
589 // Change the objects on the false gray stack from gray to white.
590 MutexLock mu(Thread::Current(), mark_stack_lock_);
591 for (mirror::Object* obj : false_gray_stack_) {
592 DCHECK(IsMarked(obj));
593 // The object could be white here if a thread got preempted after a success at the
594 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
595 // still gray), and the thread ran to register it onto the false gray stack.
596 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
597 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
598 ReadBarrier::WhitePtr());
599 DCHECK(success);
600 }
601 }
602 false_gray_stack_.clear();
603}
604
605
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800606void ConcurrentCopying::IssueEmptyCheckpoint() {
607 Thread* self = Thread::Current();
608 EmptyCheckpoint check_point(this);
609 ThreadList* thread_list = Runtime::Current()->GetThreadList();
610 gc_barrier_->Init(self, 0);
611 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800612 // If there are no threads to wait which implys that all the checkpoint functions are finished,
613 // then no need to release the mutator lock.
614 if (barrier_count == 0) {
615 return;
616 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800617 // Release locks then wait for all mutator threads to pass the barrier.
618 Locks::mutator_lock_->SharedUnlock(self);
619 {
620 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
621 gc_barrier_->Increment(self, barrier_count);
622 }
623 Locks::mutator_lock_->SharedLock(self);
624}
625
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700626void ConcurrentCopying::ExpandGcMarkStack() {
627 DCHECK(gc_mark_stack_->IsFull());
628 const size_t new_size = gc_mark_stack_->Capacity() * 2;
629 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
630 gc_mark_stack_->End());
631 gc_mark_stack_->Resize(new_size);
632 for (auto& ref : temp) {
633 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
634 }
635 DCHECK(!gc_mark_stack_->IsFull());
636}
637
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800638void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700639 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800640 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700641 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
642 CHECK(thread_running_gc_ != nullptr);
643 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700644 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
645 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700646 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
647 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700648 if (UNLIKELY(gc_mark_stack_->IsFull())) {
649 ExpandGcMarkStack();
650 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700651 gc_mark_stack_->PushBack(to_ref);
652 } else {
653 // Otherwise, use a thread-local mark stack.
654 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
655 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
656 MutexLock mu(self, mark_stack_lock_);
657 // Get a new thread local mark stack.
658 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
659 if (!pooled_mark_stacks_.empty()) {
660 // Use a pooled mark stack.
661 new_tl_mark_stack = pooled_mark_stacks_.back();
662 pooled_mark_stacks_.pop_back();
663 } else {
664 // None pooled. Create a new one.
665 new_tl_mark_stack =
666 accounting::AtomicStack<mirror::Object>::Create(
667 "thread local mark stack", 4 * KB, 4 * KB);
668 }
669 DCHECK(new_tl_mark_stack != nullptr);
670 DCHECK(new_tl_mark_stack->IsEmpty());
671 new_tl_mark_stack->PushBack(to_ref);
672 self->SetThreadLocalMarkStack(new_tl_mark_stack);
673 if (tl_mark_stack != nullptr) {
674 // Store the old full stack into a vector.
675 revoked_mark_stacks_.push_back(tl_mark_stack);
676 }
677 } else {
678 tl_mark_stack->PushBack(to_ref);
679 }
680 }
681 } else if (mark_stack_mode == kMarkStackModeShared) {
682 // Access the shared GC mark stack with a lock.
683 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700684 if (UNLIKELY(gc_mark_stack_->IsFull())) {
685 ExpandGcMarkStack();
686 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700687 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800688 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700689 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700690 static_cast<uint32_t>(kMarkStackModeGcExclusive))
691 << "ref=" << to_ref
692 << " self->gc_marking=" << self->GetIsGcMarking()
693 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700694 CHECK(self == thread_running_gc_)
695 << "Only GC-running thread should access the mark stack "
696 << "in the GC exclusive mark stack mode";
697 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700698 if (UNLIKELY(gc_mark_stack_->IsFull())) {
699 ExpandGcMarkStack();
700 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700701 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800702 }
703}
704
705accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
706 return heap_->allocation_stack_.get();
707}
708
709accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
710 return heap_->live_stack_.get();
711}
712
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800713// The following visitors are used to verify that there's no references to the from-space left after
714// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700715class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800716 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700717 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800718 : collector_(collector) {}
719
720 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700721 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800722 if (ref == nullptr) {
723 // OK.
724 return;
725 }
726 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
727 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800728 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
729 << "Ref " << ref << " " << PrettyTypeOf(ref)
730 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800731 }
732 }
733
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700734 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700735 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800736 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700737 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800738 }
739
740 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700741 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800742};
743
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700744class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800745 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700746 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800747 : collector_(collector) {}
748
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700749 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700750 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800751 mirror::Object* ref =
752 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700753 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800754 visitor(ref);
755 }
756 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700757 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800758 CHECK(klass->IsTypeOfReferenceClass());
759 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
760 }
761
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700762 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
763 SHARED_REQUIRES(Locks::mutator_lock_) {
764 if (!root->IsNull()) {
765 VisitRoot(root);
766 }
767 }
768
769 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
770 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700771 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700772 visitor(root->AsMirrorPtr());
773 }
774
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800775 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700776 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800777};
778
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700779class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800780 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700781 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800782 : collector_(collector) {}
783 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700784 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800785 ObjectCallback(obj, collector_);
786 }
787 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700788 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800789 CHECK(obj != nullptr);
790 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
791 space::RegionSpace* region_space = collector->RegionSpace();
792 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700793 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700794 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800795 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800796 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
797 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800798 }
799 }
800
801 private:
802 ConcurrentCopying* const collector_;
803};
804
805// Verify there's no from-space references left after the marking phase.
806void ConcurrentCopying::VerifyNoFromSpaceReferences() {
807 Thread* self = Thread::Current();
808 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700809 // Verify all threads have is_gc_marking to be false
810 {
811 MutexLock mu(self, *Locks::thread_list_lock_);
812 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
813 for (Thread* thread : thread_list) {
814 CHECK(!thread->GetIsGcMarking());
815 }
816 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700817 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800818 // Roots.
819 {
820 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700821 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700822 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800823 }
824 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700825 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800826 // Non-moving spaces.
827 {
828 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
829 heap_->GetMarkBitmap()->Visit(visitor);
830 }
831 // The alloc stack.
832 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700833 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800834 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
835 it < end; ++it) {
836 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800837 if (obj != nullptr && obj->GetClass() != nullptr) {
838 // TODO: need to call this only if obj is alive?
839 ref_visitor(obj);
840 visitor(obj);
841 }
842 }
843 }
844 // TODO: LOS. But only refs in LOS are classes.
845}
846
847// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700848class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800849 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700850 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800851 : collector_(collector) {}
852
853 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700854 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800855 if (ref == nullptr) {
856 // OK.
857 return;
858 }
859 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
860 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800861
862 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700863 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800864};
865
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700866class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800867 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700868 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800869 : collector_(collector) {}
870
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700871 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700872 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800873 mirror::Object* ref =
874 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700875 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800876 visitor(ref);
877 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700878 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700879 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800880 CHECK(klass->IsTypeOfReferenceClass());
881 }
882
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700883 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
884 SHARED_REQUIRES(Locks::mutator_lock_) {
885 if (!root->IsNull()) {
886 VisitRoot(root);
887 }
888 }
889
890 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
891 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700892 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700893 visitor(root->AsMirrorPtr());
894 }
895
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800896 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700897 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800898};
899
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700900class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800901 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700902 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800903 : collector_(collector) {}
904 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700905 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800906 ObjectCallback(obj, collector_);
907 }
908 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700909 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800910 CHECK(obj != nullptr);
911 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
912 space::RegionSpace* region_space = collector->RegionSpace();
913 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
914 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700915 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700916 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800917 }
918
919 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700920 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800921};
922
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700923class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700924 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100925 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
926 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700927 : concurrent_copying_(concurrent_copying),
928 disable_weak_ref_access_(disable_weak_ref_access) {
929 }
930
931 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
932 // Note: self is not necessarily equal to thread since thread may be suspended.
933 Thread* self = Thread::Current();
934 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
935 << thread->GetState() << " thread " << thread << " self " << self;
936 // Revoke thread local mark stacks.
937 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
938 if (tl_mark_stack != nullptr) {
939 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
940 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
941 thread->SetThreadLocalMarkStack(nullptr);
942 }
943 // Disable weak ref access.
944 if (disable_weak_ref_access_) {
945 thread->SetWeakRefAccessEnabled(false);
946 }
947 // If thread is a running mutator, then act on behalf of the garbage collector.
948 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700949 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700950 }
951
952 private:
953 ConcurrentCopying* const concurrent_copying_;
954 const bool disable_weak_ref_access_;
955};
956
957void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
958 Thread* self = Thread::Current();
959 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
960 ThreadList* thread_list = Runtime::Current()->GetThreadList();
961 gc_barrier_->Init(self, 0);
962 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
963 // If there are no threads to wait which implys that all the checkpoint functions are finished,
964 // then no need to release the mutator lock.
965 if (barrier_count == 0) {
966 return;
967 }
968 Locks::mutator_lock_->SharedUnlock(self);
969 {
970 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
971 gc_barrier_->Increment(self, barrier_count);
972 }
973 Locks::mutator_lock_->SharedLock(self);
974}
975
976void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
977 Thread* self = Thread::Current();
978 CHECK_EQ(self, thread);
979 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
980 if (tl_mark_stack != nullptr) {
981 CHECK(is_marking_);
982 MutexLock mu(self, mark_stack_lock_);
983 revoked_mark_stacks_.push_back(tl_mark_stack);
984 thread->SetThreadLocalMarkStack(nullptr);
985 }
986}
987
988void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800989 if (kVerboseMode) {
990 LOG(INFO) << "ProcessMarkStack. ";
991 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700992 bool empty_prev = false;
993 while (true) {
994 bool empty = ProcessMarkStackOnce();
995 if (empty_prev && empty) {
996 // Saw empty mark stack for a second time, done.
997 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800998 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700999 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001000 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001001}
1002
1003bool ConcurrentCopying::ProcessMarkStackOnce() {
1004 Thread* self = Thread::Current();
1005 CHECK(thread_running_gc_ != nullptr);
1006 CHECK(self == thread_running_gc_);
1007 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1008 size_t count = 0;
1009 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1010 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1011 // Process the thread-local mark stacks and the GC mark stack.
1012 count += ProcessThreadLocalMarkStacks(false);
1013 while (!gc_mark_stack_->IsEmpty()) {
1014 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1015 ProcessMarkStackRef(to_ref);
1016 ++count;
1017 }
1018 gc_mark_stack_->Reset();
1019 } else if (mark_stack_mode == kMarkStackModeShared) {
1020 // Process the shared GC mark stack with a lock.
1021 {
1022 MutexLock mu(self, mark_stack_lock_);
1023 CHECK(revoked_mark_stacks_.empty());
1024 }
1025 while (true) {
1026 std::vector<mirror::Object*> refs;
1027 {
1028 // Copy refs with lock. Note the number of refs should be small.
1029 MutexLock mu(self, mark_stack_lock_);
1030 if (gc_mark_stack_->IsEmpty()) {
1031 break;
1032 }
1033 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1034 p != gc_mark_stack_->End(); ++p) {
1035 refs.push_back(p->AsMirrorPtr());
1036 }
1037 gc_mark_stack_->Reset();
1038 }
1039 for (mirror::Object* ref : refs) {
1040 ProcessMarkStackRef(ref);
1041 ++count;
1042 }
1043 }
1044 } else {
1045 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1046 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1047 {
1048 MutexLock mu(self, mark_stack_lock_);
1049 CHECK(revoked_mark_stacks_.empty());
1050 }
1051 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1052 while (!gc_mark_stack_->IsEmpty()) {
1053 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1054 ProcessMarkStackRef(to_ref);
1055 ++count;
1056 }
1057 gc_mark_stack_->Reset();
1058 }
1059
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001060 // Return true if the stack was empty.
1061 return count == 0;
1062}
1063
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001064size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1065 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1066 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1067 size_t count = 0;
1068 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1069 {
1070 MutexLock mu(Thread::Current(), mark_stack_lock_);
1071 // Make a copy of the mark stack vector.
1072 mark_stacks = revoked_mark_stacks_;
1073 revoked_mark_stacks_.clear();
1074 }
1075 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1076 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1077 mirror::Object* to_ref = p->AsMirrorPtr();
1078 ProcessMarkStackRef(to_ref);
1079 ++count;
1080 }
1081 {
1082 MutexLock mu(Thread::Current(), mark_stack_lock_);
1083 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1084 // The pool has enough. Delete it.
1085 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001086 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001087 // Otherwise, put it into the pool for later reuse.
1088 mark_stack->Reset();
1089 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001090 }
1091 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001092 }
1093 return count;
1094}
1095
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001096inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001097 DCHECK(!region_space_->IsInFromSpace(to_ref));
1098 if (kUseBakerReadBarrier) {
1099 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1100 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1101 << " is_marked=" << IsMarked(to_ref);
1102 }
1103 // Scan ref fields.
1104 Scan(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001105 if (kUseBakerReadBarrier) {
1106 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1107 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1108 << " is_marked=" << IsMarked(to_ref);
1109 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001110#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1111 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1112 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1113 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001114 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1115 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001116 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001117 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001118 // We may occasionally leave a reference white in the queue if its referent happens to be
1119 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1120 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1121 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001122 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001123 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1124 ReadBarrier::GrayPtr(),
1125 ReadBarrier::WhitePtr());
1126 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001127 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001128 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001129#else
1130 DCHECK(!kUseBakerReadBarrier);
1131#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001132
1133 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1134 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1135 // GC-running thread (no synchronization required).
1136 DCHECK(region_space_bitmap_->Test(to_ref));
1137 // Disable the read barrier in SizeOf for performance, which is safe.
1138 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1139 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1140 region_space_->AddLiveBytes(to_ref, alloc_size);
1141 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001142 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001143 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001144 visitor(to_ref);
1145 }
1146}
1147
1148void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1149 Thread* self = Thread::Current();
1150 CHECK(thread_running_gc_ != nullptr);
1151 CHECK_EQ(self, thread_running_gc_);
1152 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1153 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1154 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1155 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1156 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1157 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1158 weak_ref_access_enabled_.StoreRelaxed(false);
1159 QuasiAtomic::ThreadFenceForConstructor();
1160 // Process the thread local mark stacks one last time after switching to the shared mark stack
1161 // mode and disable weak ref accesses.
1162 ProcessThreadLocalMarkStacks(true);
1163 if (kVerboseMode) {
1164 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1165 }
1166}
1167
1168void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1169 Thread* self = Thread::Current();
1170 CHECK(thread_running_gc_ != nullptr);
1171 CHECK_EQ(self, thread_running_gc_);
1172 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1173 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1174 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1175 static_cast<uint32_t>(kMarkStackModeShared));
1176 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1177 QuasiAtomic::ThreadFenceForConstructor();
1178 if (kVerboseMode) {
1179 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1180 }
1181}
1182
1183void ConcurrentCopying::CheckEmptyMarkStack() {
1184 Thread* self = Thread::Current();
1185 CHECK(thread_running_gc_ != nullptr);
1186 CHECK_EQ(self, thread_running_gc_);
1187 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1188 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1189 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1190 // Thread-local mark stack mode.
1191 RevokeThreadLocalMarkStacks(false);
1192 MutexLock mu(Thread::Current(), mark_stack_lock_);
1193 if (!revoked_mark_stacks_.empty()) {
1194 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1195 while (!mark_stack->IsEmpty()) {
1196 mirror::Object* obj = mark_stack->PopBack();
1197 if (kUseBakerReadBarrier) {
1198 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1199 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1200 << " is_marked=" << IsMarked(obj);
1201 } else {
1202 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1203 << " is_marked=" << IsMarked(obj);
1204 }
1205 }
1206 }
1207 LOG(FATAL) << "mark stack is not empty";
1208 }
1209 } else {
1210 // Shared, GC-exclusive, or off.
1211 MutexLock mu(Thread::Current(), mark_stack_lock_);
1212 CHECK(gc_mark_stack_->IsEmpty());
1213 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001214 }
1215}
1216
1217void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1218 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1219 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001220 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001221}
1222
1223void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1224 {
1225 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1226 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1227 if (kEnableFromSpaceAccountingCheck) {
1228 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1229 }
1230 heap_->MarkAllocStackAsLive(live_stack);
1231 live_stack->Reset();
1232 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001233 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001234 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1235 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1236 if (space->IsContinuousMemMapAllocSpace()) {
1237 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001238 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001239 continue;
1240 }
1241 TimingLogger::ScopedTiming split2(
1242 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1243 RecordFree(alloc_space->Sweep(swap_bitmaps));
1244 }
1245 }
1246 SweepLargeObjects(swap_bitmaps);
1247}
1248
1249void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1250 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1251 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1252}
1253
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001254void ConcurrentCopying::ReclaimPhase() {
1255 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1256 if (kVerboseMode) {
1257 LOG(INFO) << "GC ReclaimPhase";
1258 }
1259 Thread* self = Thread::Current();
1260
1261 {
1262 // Double-check that the mark stack is empty.
1263 // Note: need to set this after VerifyNoFromSpaceRef().
1264 is_asserting_to_space_invariant_ = false;
1265 QuasiAtomic::ThreadFenceForConstructor();
1266 if (kVerboseMode) {
1267 LOG(INFO) << "Issue an empty check point. ";
1268 }
1269 IssueEmptyCheckpoint();
1270 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001271 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001272 if (kUseBakerReadBarrier) {
1273 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1274 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001275 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001276 }
1277
1278 {
1279 // Record freed objects.
1280 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1281 // Don't include thread-locals that are in the to-space.
1282 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1283 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1284 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1285 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1286 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1287 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1288 if (kEnableFromSpaceAccountingCheck) {
1289 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1290 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1291 }
1292 CHECK_LE(to_objects, from_objects);
1293 CHECK_LE(to_bytes, from_bytes);
1294 int64_t freed_bytes = from_bytes - to_bytes;
1295 int64_t freed_objects = from_objects - to_objects;
1296 if (kVerboseMode) {
1297 LOG(INFO) << "RecordFree:"
1298 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1299 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1300 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1301 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1302 << " from_space size=" << region_space_->FromSpaceSize()
1303 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1304 << " to_space size=" << region_space_->ToSpaceSize();
1305 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1306 }
1307 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1308 if (kVerboseMode) {
1309 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1310 }
1311 }
1312
1313 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001314 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1315 region_space_->ClearFromSpace();
1316 }
1317
1318 {
1319 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001320 Sweep(false);
1321 SwapBitmaps();
1322 heap_->UnBindBitmaps();
1323
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001324 // Delete the region bitmap.
1325 DCHECK(region_space_bitmap_ != nullptr);
1326 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001327 region_space_bitmap_ = nullptr;
1328 }
1329
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001330 CheckEmptyMarkStack();
1331
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001332 if (kVerboseMode) {
1333 LOG(INFO) << "GC end of ReclaimPhase";
1334 }
1335}
1336
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001337// Assert the to-space invariant.
1338void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1339 mirror::Object* ref) {
1340 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1341 if (is_asserting_to_space_invariant_) {
1342 if (region_space_->IsInToSpace(ref)) {
1343 // OK.
1344 return;
1345 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1346 CHECK(region_space_bitmap_->Test(ref)) << ref;
1347 } else if (region_space_->IsInFromSpace(ref)) {
1348 // Not OK. Do extra logging.
1349 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001350 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001351 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001352 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001353 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1354 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001355 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1356 }
1357 }
1358}
1359
1360class RootPrinter {
1361 public:
1362 RootPrinter() { }
1363
1364 template <class MirrorType>
1365 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001366 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001367 if (!root->IsNull()) {
1368 VisitRoot(root);
1369 }
1370 }
1371
1372 template <class MirrorType>
1373 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001374 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001375 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1376 }
1377
1378 template <class MirrorType>
1379 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001380 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001381 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1382 }
1383};
1384
1385void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1386 mirror::Object* ref) {
1387 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1388 if (is_asserting_to_space_invariant_) {
1389 if (region_space_->IsInToSpace(ref)) {
1390 // OK.
1391 return;
1392 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1393 CHECK(region_space_bitmap_->Test(ref)) << ref;
1394 } else if (region_space_->IsInFromSpace(ref)) {
1395 // Not OK. Do extra logging.
1396 if (gc_root_source == nullptr) {
1397 // No info.
1398 } else if (gc_root_source->HasArtField()) {
1399 ArtField* field = gc_root_source->GetArtField();
1400 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1401 RootPrinter root_printer;
1402 field->VisitRoots(root_printer);
1403 } else if (gc_root_source->HasArtMethod()) {
1404 ArtMethod* method = gc_root_source->GetArtMethod();
1405 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1406 RootPrinter root_printer;
Mathieu Chartier1147b9b2015-09-14 18:50:08 -07001407 method->VisitRoots(root_printer, sizeof(void*));
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001408 }
1409 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1410 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1411 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1412 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1413 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1414 } else {
1415 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1416 }
1417 }
1418}
1419
1420void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1421 if (kUseBakerReadBarrier) {
1422 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1423 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1424 } else {
1425 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1426 }
1427 if (region_space_->IsInFromSpace(obj)) {
1428 LOG(INFO) << "holder is in the from-space.";
1429 } else if (region_space_->IsInToSpace(obj)) {
1430 LOG(INFO) << "holder is in the to-space.";
1431 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1432 LOG(INFO) << "holder is in the unevac from-space.";
1433 if (region_space_bitmap_->Test(obj)) {
1434 LOG(INFO) << "holder is marked in the region space bitmap.";
1435 } else {
1436 LOG(INFO) << "holder is not marked in the region space bitmap.";
1437 }
1438 } else {
1439 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001440 if (immune_spaces_.ContainsObject(obj)) {
1441 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001442 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001443 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001444 accounting::ContinuousSpaceBitmap* mark_bitmap =
1445 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1446 accounting::LargeObjectBitmap* los_bitmap =
1447 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1448 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1449 bool is_los = mark_bitmap == nullptr;
1450 if (!is_los && mark_bitmap->Test(obj)) {
1451 LOG(INFO) << "holder is marked in the mark bit map.";
1452 } else if (is_los && los_bitmap->Test(obj)) {
1453 LOG(INFO) << "holder is marked in the los bit map.";
1454 } else {
1455 // If ref is on the allocation stack, then it is considered
1456 // mark/alive (but not necessarily on the live stack.)
1457 if (IsOnAllocStack(obj)) {
1458 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001459 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001460 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001461 }
1462 }
1463 }
1464 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001465 LOG(INFO) << "offset=" << offset.SizeValue();
1466}
1467
1468void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1469 mirror::Object* ref) {
1470 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001471 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001472 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001473 // Immune object may not be gray if called from the GC.
1474 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1475 return;
1476 }
1477 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1478 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001479 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001480 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1481 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1482 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001483 }
1484 } else {
1485 accounting::ContinuousSpaceBitmap* mark_bitmap =
1486 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1487 accounting::LargeObjectBitmap* los_bitmap =
1488 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1489 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1490 bool is_los = mark_bitmap == nullptr;
1491 if ((!is_los && mark_bitmap->Test(ref)) ||
1492 (is_los && los_bitmap->Test(ref))) {
1493 // OK.
1494 } else {
1495 // If ref is on the allocation stack, then it may not be
1496 // marked live, but considered marked/alive (but not
1497 // necessarily on the live stack).
1498 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1499 << "obj=" << obj << " ref=" << ref;
1500 }
1501 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001502}
1503
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001504// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001505class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001506 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001507 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001508 : collector_(collector) {}
1509
1510 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001511 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1512 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001513 collector_->Process(obj, offset);
1514 }
1515
1516 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001517 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001518 CHECK(klass->IsTypeOfReferenceClass());
1519 collector_->DelayReferenceReferent(klass, ref);
1520 }
1521
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001522 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001523 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001524 SHARED_REQUIRES(Locks::mutator_lock_) {
1525 if (!root->IsNull()) {
1526 VisitRoot(root);
1527 }
1528 }
1529
1530 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001531 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001532 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001533 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001534 }
1535
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001536 private:
1537 ConcurrentCopying* const collector_;
1538};
1539
1540// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001541inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001542 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001543 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001544 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001545 // Disable the read barrier for a performance reason.
1546 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1547 visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001548}
1549
1550// Process a field.
1551inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001552 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001553 mirror::Object* ref = obj->GetFieldObject<
1554 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001555 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001556 if (to_ref == ref) {
1557 return;
1558 }
1559 // This may fail if the mutator writes to the field at the same time. But it's ok.
1560 mirror::Object* expected_ref = ref;
1561 mirror::Object* new_ref = to_ref;
1562 do {
1563 if (expected_ref !=
1564 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1565 // It was updated by the mutator.
1566 break;
1567 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001568 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001569 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001570}
1571
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001572// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001573inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001574 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1575 for (size_t i = 0; i < count; ++i) {
1576 mirror::Object** root = roots[i];
1577 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001578 mirror::Object* to_ref = Mark(ref);
1579 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001580 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001581 }
1582 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1583 mirror::Object* expected_ref = ref;
1584 mirror::Object* new_ref = to_ref;
1585 do {
1586 if (expected_ref != addr->LoadRelaxed()) {
1587 // It was updated by the mutator.
1588 break;
1589 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001590 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001591 }
1592}
1593
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001594template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001595inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001596 DCHECK(!root->IsNull());
1597 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001598 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001599 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001600 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1601 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1602 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001603 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001604 do {
1605 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1606 // It was updated by the mutator.
1607 break;
1608 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001609 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001610 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001611}
1612
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001613inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001614 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1615 const RootInfo& info ATTRIBUTE_UNUSED) {
1616 for (size_t i = 0; i < count; ++i) {
1617 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1618 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001619 // kGrayImmuneObject is true because this is used for the thread flip.
1620 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001621 }
1622 }
1623}
1624
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001625// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1626class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1627 public:
1628 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1629 : collector_(collector), enabled_(false) {
1630 if (kUseBakerReadBarrier &&
1631 collector_->thread_running_gc_ == Thread::Current() &&
1632 !collector_->gc_grays_immune_objects_) {
1633 collector_->gc_grays_immune_objects_ = true;
1634 enabled_ = true;
1635 }
1636 }
1637
1638 ~ScopedGcGraysImmuneObjects() {
1639 if (kUseBakerReadBarrier &&
1640 collector_->thread_running_gc_ == Thread::Current() &&
1641 enabled_) {
1642 DCHECK(collector_->gc_grays_immune_objects_);
1643 collector_->gc_grays_immune_objects_ = false;
1644 }
1645 }
1646
1647 private:
1648 ConcurrentCopying* const collector_;
1649 bool enabled_;
1650};
1651
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001652// Fill the given memory block with a dummy object. Used to fill in a
1653// copy of objects that was lost in race.
1654void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001655 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1656 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1657 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1658 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001659 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001660 memset(dummy_obj, 0, byte_size);
1661 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1662 CHECK(int_array_class != nullptr);
1663 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1664 size_t component_size = int_array_class->GetComponentSize();
1665 CHECK_EQ(component_size, sizeof(int32_t));
1666 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1667 if (data_offset > byte_size) {
1668 // An int array is too big. Use java.lang.Object.
1669 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1670 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1671 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1672 dummy_obj->SetClass(java_lang_Object);
1673 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1674 } else {
1675 // Use an int array.
1676 dummy_obj->SetClass(int_array_class);
1677 CHECK(dummy_obj->IsArrayInstance());
1678 int32_t length = (byte_size - data_offset) / component_size;
1679 dummy_obj->AsArray()->SetLength(length);
1680 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1681 << "byte_size=" << byte_size << " length=" << length
1682 << " component_size=" << component_size << " data_offset=" << data_offset;
1683 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1684 << "byte_size=" << byte_size << " length=" << length
1685 << " component_size=" << component_size << " data_offset=" << data_offset;
1686 }
1687}
1688
1689// Reuse the memory blocks that were copy of objects that were lost in race.
1690mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1691 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001692 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001693 Thread* self = Thread::Current();
1694 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1695 MutexLock mu(self, skipped_blocks_lock_);
1696 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1697 if (it == skipped_blocks_map_.end()) {
1698 // Not found.
1699 return nullptr;
1700 }
1701 {
1702 size_t byte_size = it->first;
1703 CHECK_GE(byte_size, alloc_size);
1704 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1705 // If remainder would be too small for a dummy object, retry with a larger request size.
1706 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1707 if (it == skipped_blocks_map_.end()) {
1708 // Not found.
1709 return nullptr;
1710 }
Roland Levillain14d90572015-07-16 10:52:26 +01001711 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001712 CHECK_GE(it->first - alloc_size, min_object_size)
1713 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1714 }
1715 }
1716 // Found a block.
1717 CHECK(it != skipped_blocks_map_.end());
1718 size_t byte_size = it->first;
1719 uint8_t* addr = it->second;
1720 CHECK_GE(byte_size, alloc_size);
1721 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
Roland Levillain14d90572015-07-16 10:52:26 +01001722 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001723 if (kVerboseMode) {
1724 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1725 }
1726 skipped_blocks_map_.erase(it);
1727 memset(addr, 0, byte_size);
1728 if (byte_size > alloc_size) {
1729 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001730 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001731 CHECK_GE(byte_size - alloc_size, min_object_size);
1732 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1733 byte_size - alloc_size);
1734 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1735 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1736 }
1737 return reinterpret_cast<mirror::Object*>(addr);
1738}
1739
1740mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1741 DCHECK(region_space_->IsInFromSpace(from_ref));
1742 // No read barrier to avoid nested RB that might violate the to-space
1743 // invariant. Note that from_ref is a from space ref so the SizeOf()
1744 // call will access the from-space meta objects, but it's ok and necessary.
1745 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1746 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1747 size_t region_space_bytes_allocated = 0U;
1748 size_t non_moving_space_bytes_allocated = 0U;
1749 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001750 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001751 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001752 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001753 bytes_allocated = region_space_bytes_allocated;
1754 if (to_ref != nullptr) {
1755 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1756 }
1757 bool fall_back_to_non_moving = false;
1758 if (UNLIKELY(to_ref == nullptr)) {
1759 // Failed to allocate in the region space. Try the skipped blocks.
1760 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1761 if (to_ref != nullptr) {
1762 // Succeeded to allocate in a skipped block.
1763 if (heap_->use_tlab_) {
1764 // This is necessary for the tlab case as it's not accounted in the space.
1765 region_space_->RecordAlloc(to_ref);
1766 }
1767 bytes_allocated = region_space_alloc_size;
1768 } else {
1769 // Fall back to the non-moving space.
1770 fall_back_to_non_moving = true;
1771 if (kVerboseMode) {
1772 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1773 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1774 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1775 }
1776 fall_back_to_non_moving = true;
1777 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001778 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001779 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1780 bytes_allocated = non_moving_space_bytes_allocated;
1781 // Mark it in the mark bitmap.
1782 accounting::ContinuousSpaceBitmap* mark_bitmap =
1783 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1784 CHECK(mark_bitmap != nullptr);
1785 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1786 }
1787 }
1788 DCHECK(to_ref != nullptr);
1789
1790 // Attempt to install the forward pointer. This is in a loop as the
1791 // lock word atomic write can fail.
1792 while (true) {
1793 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1794 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001795
1796 LockWord old_lock_word = to_ref->GetLockWord(false);
1797
1798 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1799 // Lost the race. Another thread (either GC or mutator) stored
1800 // the forwarding pointer first. Make the lost copy (to_ref)
1801 // look like a valid but dead (dummy) object and keep it for
1802 // future reuse.
1803 FillWithDummyObject(to_ref, bytes_allocated);
1804 if (!fall_back_to_non_moving) {
1805 DCHECK(region_space_->IsInToSpace(to_ref));
1806 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1807 // Free the large alloc.
1808 region_space_->FreeLarge(to_ref, bytes_allocated);
1809 } else {
1810 // Record the lost copy for later reuse.
1811 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1812 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1813 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1814 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1815 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1816 reinterpret_cast<uint8_t*>(to_ref)));
1817 }
1818 } else {
1819 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1820 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1821 // Free the non-moving-space chunk.
1822 accounting::ContinuousSpaceBitmap* mark_bitmap =
1823 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1824 CHECK(mark_bitmap != nullptr);
1825 CHECK(mark_bitmap->Clear(to_ref));
1826 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1827 }
1828
1829 // Get the winner's forward ptr.
1830 mirror::Object* lost_fwd_ptr = to_ref;
1831 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1832 CHECK(to_ref != nullptr);
1833 CHECK_NE(to_ref, lost_fwd_ptr);
1834 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1835 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1836 return to_ref;
1837 }
1838
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001839 // Set the gray ptr.
1840 if (kUseBakerReadBarrier) {
1841 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1842 }
1843
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001844 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1845
1846 // Try to atomically write the fwd ptr.
1847 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1848 if (LIKELY(success)) {
1849 // The CAS succeeded.
1850 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1851 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1852 if (LIKELY(!fall_back_to_non_moving)) {
1853 DCHECK(region_space_->IsInToSpace(to_ref));
1854 } else {
1855 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1856 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1857 }
1858 if (kUseBakerReadBarrier) {
1859 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1860 }
1861 DCHECK(GetFwdPtr(from_ref) == to_ref);
1862 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001863 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001864 return to_ref;
1865 } else {
1866 // The CAS failed. It may have lost the race or may have failed
1867 // due to monitor/hashcode ops. Either way, retry.
1868 }
1869 }
1870}
1871
1872mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1873 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001874 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1875 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001876 // It's already marked.
1877 return from_ref;
1878 }
1879 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001880 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001881 to_ref = GetFwdPtr(from_ref);
1882 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1883 heap_->non_moving_space_->HasAddress(to_ref))
1884 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001885 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001886 if (region_space_bitmap_->Test(from_ref)) {
1887 to_ref = from_ref;
1888 } else {
1889 to_ref = nullptr;
1890 }
1891 } else {
1892 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001893 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001894 // An immune object is alive.
1895 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001896 } else {
1897 // Non-immune non-moving space. Use the mark bitmap.
1898 accounting::ContinuousSpaceBitmap* mark_bitmap =
1899 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1900 accounting::LargeObjectBitmap* los_bitmap =
1901 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1902 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1903 bool is_los = mark_bitmap == nullptr;
1904 if (!is_los && mark_bitmap->Test(from_ref)) {
1905 // Already marked.
1906 to_ref = from_ref;
1907 } else if (is_los && los_bitmap->Test(from_ref)) {
1908 // Already marked in LOS.
1909 to_ref = from_ref;
1910 } else {
1911 // Not marked.
1912 if (IsOnAllocStack(from_ref)) {
1913 // If on the allocation stack, it's considered marked.
1914 to_ref = from_ref;
1915 } else {
1916 // Not marked.
1917 to_ref = nullptr;
1918 }
1919 }
1920 }
1921 }
1922 return to_ref;
1923}
1924
1925bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1926 QuasiAtomic::ThreadFenceAcquire();
1927 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001928 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001929}
1930
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001931mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
1932 // ref is in a non-moving space (from_ref == to_ref).
1933 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001934 DCHECK(!immune_spaces_.ContainsObject(ref));
1935 // Use the mark bitmap.
1936 accounting::ContinuousSpaceBitmap* mark_bitmap =
1937 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1938 accounting::LargeObjectBitmap* los_bitmap =
1939 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1940 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1941 bool is_los = mark_bitmap == nullptr;
1942 if (!is_los && mark_bitmap->Test(ref)) {
1943 // Already marked.
1944 if (kUseBakerReadBarrier) {
1945 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1946 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001947 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001948 } else if (is_los && los_bitmap->Test(ref)) {
1949 // Already marked in LOS.
1950 if (kUseBakerReadBarrier) {
1951 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1952 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1953 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001954 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001955 // Not marked.
1956 if (IsOnAllocStack(ref)) {
1957 // If it's on the allocation stack, it's considered marked. Keep it white.
1958 // Objects on the allocation stack need not be marked.
1959 if (!is_los) {
1960 DCHECK(!mark_bitmap->Test(ref));
1961 } else {
1962 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001963 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00001964 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001965 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001966 }
1967 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001968 // For the baker-style RB, we need to handle 'false-gray' cases. See the
1969 // kRegionTypeUnevacFromSpace-case comment in Mark().
1970 if (kUseBakerReadBarrier) {
1971 // Test the bitmap first to reduce the chance of false gray cases.
1972 if ((!is_los && mark_bitmap->Test(ref)) ||
1973 (is_los && los_bitmap->Test(ref))) {
1974 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001975 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001976 }
1977 // Not marked or on the allocation stack. Try to mark it.
1978 // This may or may not succeed, which is ok.
1979 bool cas_success = false;
1980 if (kUseBakerReadBarrier) {
1981 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
1982 ReadBarrier::GrayPtr());
1983 }
1984 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
1985 // Already marked.
1986 if (kUseBakerReadBarrier && cas_success &&
1987 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
1988 PushOntoFalseGrayStack(ref);
1989 }
1990 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
1991 // Already marked in LOS.
1992 if (kUseBakerReadBarrier && cas_success &&
1993 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
1994 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001995 }
1996 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001997 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001998 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001999 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002000 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002001 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002002 }
2003 }
2004 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002005 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002006}
2007
2008void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002009 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002010 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002011 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002012 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2013 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002014 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002015 {
2016 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2017 skipped_blocks_map_.clear();
2018 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002019 {
2020 ReaderMutexLock mu(self, *Locks::mutator_lock_);
2021 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2022 heap_->ClearMarkedObjects();
2023 }
2024 if (measure_read_barrier_slow_path_) {
2025 MutexLock mu(self, rb_slow_path_histogram_lock_);
2026 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2027 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2028 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2029 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002030}
2031
Mathieu Chartier97509952015-07-13 14:35:43 -07002032bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002033 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002034 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002035 if (to_ref == nullptr) {
2036 return false;
2037 }
2038 if (from_ref != to_ref) {
2039 QuasiAtomic::ThreadFenceRelease();
2040 field->Assign(to_ref);
2041 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2042 }
2043 return true;
2044}
2045
Mathieu Chartier97509952015-07-13 14:35:43 -07002046mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2047 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002048}
2049
2050void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002051 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002052}
2053
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002054void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002055 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002056 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002057 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2058 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002059 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002060}
2061
2062void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2063 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2064 region_space_->RevokeAllThreadLocalBuffers();
2065}
2066
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002067mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2068 if (Thread::Current() != thread_running_gc_) {
2069 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2070 } else {
2071 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2072 }
2073 ScopedTrace tr(__FUNCTION__);
2074 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2075 mirror::Object* ret = Mark(from_ref);
2076 if (measure_read_barrier_slow_path_) {
2077 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2078 }
2079 return ret;
2080}
2081
2082void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2083 GarbageCollector::DumpPerformanceInfo(os);
2084 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2085 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2086 Histogram<uint64_t>::CumulativeData cumulative_data;
2087 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2088 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2089 }
2090 if (rb_slow_path_count_total_ > 0) {
2091 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2092 }
2093 if (rb_slow_path_count_gc_total_ > 0) {
2094 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2095 }
2096}
2097
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002098} // namespace collector
2099} // namespace gc
2100} // namespace art