blob: c73d3013f303f5f25095e17e45493b10933362c5 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070020#include "base/stl_util.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070021#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/accounting/heap_bitmap-inl.h"
23#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070024#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/space/image_space.h"
26#include "gc/space/space.h"
27#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070028#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029#include "mirror/object-inl.h"
30#include "scoped_thread_state_change.h"
31#include "thread-inl.h"
32#include "thread_list.h"
33#include "well_known_classes.h"
34
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070035namespace art {
36namespace gc {
37namespace collector {
38
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070039static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
40
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080041ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
42 : GarbageCollector(heap,
43 name_prefix + (name_prefix.empty() ? "" : " ") +
44 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070045 region_space_(nullptr), gc_barrier_(new Barrier(0)),
46 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070047 kDefaultGcMarkStackSize,
48 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070049 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
50 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080051 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070052 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
53 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080054 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
55 rb_table_(heap_->GetReadBarrierTable()),
56 force_evacuate_all_(false) {
57 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
58 "The region space size and the read barrier table region size must match");
59 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070060 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080061 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
63 // Cache this so that we won't have to lock heap_bitmap_lock_ in
64 // Mark() which could cause a nested lock on heap_bitmap_lock_
65 // when GC causes a RB while doing GC or a lock order violation
66 // (class_linker_lock_ and heap_bitmap_lock_).
67 heap_mark_bitmap_ = heap->GetMarkBitmap();
68 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070069 {
70 MutexLock mu(self, mark_stack_lock_);
71 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
72 accounting::AtomicStack<mirror::Object>* mark_stack =
73 accounting::AtomicStack<mirror::Object>::Create(
74 "thread local mark stack", kMarkStackSize, kMarkStackSize);
75 pooled_mark_stacks_.push_back(mark_stack);
76 }
77 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080078}
79
Mathieu Chartierb19ccb12015-07-15 10:24:16 -070080void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
81 // Used for preserving soft references, should be OK to not have a CAS here since there should be
82 // no other threads which can trigger read barriers on the same referent during reference
83 // processing.
84 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -070085 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -070086}
87
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080088ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070089 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080090}
91
92void ConcurrentCopying::RunPhases() {
93 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
94 CHECK(!is_active_);
95 is_active_ = true;
96 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070097 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080098 Locks::mutator_lock_->AssertNotHeld(self);
99 {
100 ReaderMutexLock mu(self, *Locks::mutator_lock_);
101 InitializePhase();
102 }
103 FlipThreadRoots();
104 {
105 ReaderMutexLock mu(self, *Locks::mutator_lock_);
106 MarkingPhase();
107 }
108 // Verify no from space refs. This causes a pause.
109 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
110 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
111 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700112 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800113 if (kVerboseMode) {
114 LOG(INFO) << "Verifying no from-space refs";
115 }
116 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700117 if (kVerboseMode) {
118 LOG(INFO) << "Done verifying no from-space refs";
119 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700120 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800121 }
122 {
123 ReaderMutexLock mu(self, *Locks::mutator_lock_);
124 ReclaimPhase();
125 }
126 FinishPhase();
127 CHECK(is_active_);
128 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700129 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800130}
131
132void ConcurrentCopying::BindBitmaps() {
133 Thread* self = Thread::Current();
134 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
135 // Mark all of the spaces we never collect as immune.
136 for (const auto& space : heap_->GetContinuousSpaces()) {
137 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
138 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
139 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
140 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
141 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
142 "cc zygote space bitmap";
143 // TODO: try avoiding using bitmaps for image/zygote to save space.
144 accounting::ContinuousSpaceBitmap* bitmap =
145 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
146 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
147 cc_bitmaps_.push_back(bitmap);
148 } else if (space == region_space_) {
149 accounting::ContinuousSpaceBitmap* bitmap =
150 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
151 space->Begin(), space->Capacity());
152 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
153 cc_bitmaps_.push_back(bitmap);
154 region_space_bitmap_ = bitmap;
155 }
156 }
157}
158
159void ConcurrentCopying::InitializePhase() {
160 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
161 if (kVerboseMode) {
162 LOG(INFO) << "GC InitializePhase";
163 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
164 << reinterpret_cast<void*>(region_space_->Limit());
165 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700166 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800167 immune_region_.Reset();
168 bytes_moved_.StoreRelaxed(0);
169 objects_moved_.StoreRelaxed(0);
170 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
171 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
172 GetCurrentIteration()->GetClearSoftReferences()) {
173 force_evacuate_all_ = true;
174 } else {
175 force_evacuate_all_ = false;
176 }
177 BindBitmaps();
178 if (kVerboseMode) {
179 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
180 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
181 LOG(INFO) << "GC end of InitializePhase";
182 }
183}
184
185// Used to switch the thread roots of a thread from from-space refs to to-space refs.
186class ThreadFlipVisitor : public Closure {
187 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100188 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800189 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
190 }
191
Mathieu Chartier90443472015-07-16 20:32:27 -0700192 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800193 // Note: self is not necessarily equal to thread since thread may be suspended.
194 Thread* self = Thread::Current();
195 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
196 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700197 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800198 if (use_tlab_ && thread->HasTlab()) {
199 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
200 // This must come before the revoke.
201 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
202 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
203 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
204 FetchAndAddSequentiallyConsistent(thread_local_objects);
205 } else {
206 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
207 }
208 }
209 if (kUseThreadLocalAllocationStack) {
210 thread->RevokeThreadLocalAllocationStack();
211 }
212 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700213 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800214 concurrent_copying_->GetBarrier().Pass(self);
215 }
216
217 private:
218 ConcurrentCopying* const concurrent_copying_;
219 const bool use_tlab_;
220};
221
222// Called back from Runtime::FlipThreadRoots() during a pause.
223class FlipCallback : public Closure {
224 public:
225 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
226 : concurrent_copying_(concurrent_copying) {
227 }
228
Mathieu Chartier90443472015-07-16 20:32:27 -0700229 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800230 ConcurrentCopying* cc = concurrent_copying_;
231 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
232 // Note: self is not necessarily equal to thread since thread may be suspended.
233 Thread* self = Thread::Current();
234 CHECK(thread == self);
235 Locks::mutator_lock_->AssertExclusiveHeld(self);
236 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700237 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800238 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
239 cc->RecordLiveStackFreezeSize(self);
240 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
241 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
242 }
243 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700244 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800245 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800246 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800247 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700248 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800249 }
250 }
251
252 private:
253 ConcurrentCopying* const concurrent_copying_;
254};
255
256// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
257void ConcurrentCopying::FlipThreadRoots() {
258 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
259 if (kVerboseMode) {
260 LOG(INFO) << "time=" << region_space_->Time();
261 region_space_->DumpNonFreeRegions(LOG(INFO));
262 }
263 Thread* self = Thread::Current();
264 Locks::mutator_lock_->AssertNotHeld(self);
265 gc_barrier_->Init(self, 0);
266 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
267 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700268 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800269 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
270 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700271 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800272 {
273 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
274 gc_barrier_->Increment(self, barrier_count);
275 }
276 is_asserting_to_space_invariant_ = true;
277 QuasiAtomic::ThreadFenceForConstructor();
278 if (kVerboseMode) {
279 LOG(INFO) << "time=" << region_space_->Time();
280 region_space_->DumpNonFreeRegions(LOG(INFO));
281 LOG(INFO) << "GC end of FlipThreadRoots";
282 }
283}
284
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700285void ConcurrentCopying::SwapStacks() {
286 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800287}
288
289void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
290 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
291 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
292}
293
294// Used to visit objects in the immune spaces.
295class ConcurrentCopyingImmuneSpaceObjVisitor {
296 public:
297 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
298 : collector_(cc) {}
299
Mathieu Chartier90443472015-07-16 20:32:27 -0700300 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
301 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800302 DCHECK(obj != nullptr);
303 DCHECK(collector_->immune_region_.ContainsObject(obj));
304 accounting::ContinuousSpaceBitmap* cc_bitmap =
305 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
306 DCHECK(cc_bitmap != nullptr)
307 << "An immune space object must have a bitmap";
308 if (kIsDebugBuild) {
309 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
310 << "Immune space object must be already marked";
311 }
312 // This may or may not succeed, which is ok.
313 if (kUseBakerReadBarrier) {
314 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
315 }
316 if (cc_bitmap->AtomicTestAndSet(obj)) {
317 // Already marked. Do nothing.
318 } else {
319 // Newly marked. Set the gray bit and push it onto the mark stack.
320 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700321 collector_->PushOntoMarkStack(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800322 }
323 }
324
325 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700326 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800327};
328
329class EmptyCheckpoint : public Closure {
330 public:
331 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
332 : concurrent_copying_(concurrent_copying) {
333 }
334
335 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
336 // Note: self is not necessarily equal to thread since thread may be suspended.
337 Thread* self = Thread::Current();
338 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
339 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800340 // If thread is a running mutator, then act on behalf of the garbage collector.
341 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700342 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800343 }
344
345 private:
346 ConcurrentCopying* const concurrent_copying_;
347};
348
349// Concurrently mark roots that are guarded by read barriers and process the mark stack.
350void ConcurrentCopying::MarkingPhase() {
351 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
352 if (kVerboseMode) {
353 LOG(INFO) << "GC MarkingPhase";
354 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700355 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800356 {
357 // Mark the image root. The WB-based collectors do not need to
358 // scan the image objects from roots by relying on the card table,
359 // but it's necessary for the RB to-space invariant to hold.
360 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
361 gc::space::ImageSpace* image = heap_->GetImageSpace();
362 if (image != nullptr) {
363 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
364 mirror::Object* marked_image_root = Mark(image_root);
365 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
366 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
367 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
368 }
369 }
370 }
371 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700372 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
373 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800374 }
375 {
376 // TODO: don't visit the transaction roots if it's not active.
377 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700378 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 }
380
381 // Immune spaces.
382 for (auto& space : heap_->GetContinuousSpaces()) {
383 if (immune_region_.ContainsSpace(space)) {
384 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
385 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
386 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
387 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
388 reinterpret_cast<uintptr_t>(space->Limit()),
389 visitor);
390 }
391 }
392
393 Thread* self = Thread::Current();
394 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700395 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700396 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
397 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
398 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
399 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
400 // reach the point where we process weak references, we can avoid using a lock when accessing
401 // the GC mark stack, which makes mark stack processing more efficient.
402
403 // Process the mark stack once in the thread local stack mode. This marks most of the live
404 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
405 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
406 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800407 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700408 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
409 // for the last time before transitioning to the shared mark stack mode, which would process new
410 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
411 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
412 // important to do these together in a single checkpoint so that we can ensure that mutators
413 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
414 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
415 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
416 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
417 SwitchToSharedMarkStackMode();
418 CHECK(!self->GetWeakRefAccessEnabled());
419 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
420 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
421 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
422 // (via read barriers) have no way to produce any more refs to process. Marking converges once
423 // before we process weak refs below.
424 ProcessMarkStack();
425 CheckEmptyMarkStack();
426 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
427 // lock from this point on.
428 SwitchToGcExclusiveMarkStackMode();
429 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800430 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800431 LOG(INFO) << "ProcessReferences";
432 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700433 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700434 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700435 ProcessReferences(self);
436 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800437 if (kVerboseMode) {
438 LOG(INFO) << "SweepSystemWeaks";
439 }
440 SweepSystemWeaks(self);
441 if (kVerboseMode) {
442 LOG(INFO) << "SweepSystemWeaks done";
443 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700444 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
445 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
446 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800447 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700448 CheckEmptyMarkStack();
449 // Re-enable weak ref accesses.
450 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700451 // Free data for class loaders that we unloaded.
452 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700453 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700454 DisableMarking();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700455 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800456 }
457
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700458 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800459 if (kVerboseMode) {
460 LOG(INFO) << "GC end of MarkingPhase";
461 }
462}
463
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700464void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
465 if (kVerboseMode) {
466 LOG(INFO) << "ReenableWeakRefAccess";
467 }
468 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
469 QuasiAtomic::ThreadFenceForConstructor();
470 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
471 {
472 MutexLock mu(self, *Locks::thread_list_lock_);
473 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
474 for (Thread* thread : thread_list) {
475 thread->SetWeakRefAccessEnabled(true);
476 }
477 }
478 // Unblock blocking threads.
479 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
480 Runtime::Current()->BroadcastForNewSystemWeaks();
481}
482
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700483class DisableMarkingCheckpoint : public Closure {
484 public:
485 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
486 : concurrent_copying_(concurrent_copying) {
487 }
488
489 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
490 // Note: self is not necessarily equal to thread since thread may be suspended.
491 Thread* self = Thread::Current();
492 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
493 << thread->GetState() << " thread " << thread << " self " << self;
494 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700495 // Note a thread that has just started right before this checkpoint may have already this flag
496 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700497 thread->SetIsGcMarking(false);
498 // If thread is a running mutator, then act on behalf of the garbage collector.
499 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700500 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700501 }
502
503 private:
504 ConcurrentCopying* const concurrent_copying_;
505};
506
507void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
508 Thread* self = Thread::Current();
509 DisableMarkingCheckpoint check_point(this);
510 ThreadList* thread_list = Runtime::Current()->GetThreadList();
511 gc_barrier_->Init(self, 0);
512 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
513 // If there are no threads to wait which implies that all the checkpoint functions are finished,
514 // then no need to release the mutator lock.
515 if (barrier_count == 0) {
516 return;
517 }
518 // Release locks then wait for all mutator threads to pass the barrier.
519 Locks::mutator_lock_->SharedUnlock(self);
520 {
521 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
522 gc_barrier_->Increment(self, barrier_count);
523 }
524 Locks::mutator_lock_->SharedLock(self);
525}
526
527void ConcurrentCopying::DisableMarking() {
528 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
529 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
530 is_marking_ = false;
531 QuasiAtomic::ThreadFenceForConstructor();
532 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
533 // still in the middle of a read barrier which may have a from-space ref cached in a local
534 // variable.
535 IssueDisableMarkingCheckpoint();
536 if (kUseTableLookupReadBarrier) {
537 heap_->rb_table_->ClearAll();
538 DCHECK(heap_->rb_table_->IsAllCleared());
539 }
540 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
541 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
542}
543
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800544void ConcurrentCopying::IssueEmptyCheckpoint() {
545 Thread* self = Thread::Current();
546 EmptyCheckpoint check_point(this);
547 ThreadList* thread_list = Runtime::Current()->GetThreadList();
548 gc_barrier_->Init(self, 0);
549 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800550 // If there are no threads to wait which implys that all the checkpoint functions are finished,
551 // then no need to release the mutator lock.
552 if (barrier_count == 0) {
553 return;
554 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800555 // Release locks then wait for all mutator threads to pass the barrier.
556 Locks::mutator_lock_->SharedUnlock(self);
557 {
558 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
559 gc_barrier_->Increment(self, barrier_count);
560 }
561 Locks::mutator_lock_->SharedLock(self);
562}
563
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700564void ConcurrentCopying::ExpandGcMarkStack() {
565 DCHECK(gc_mark_stack_->IsFull());
566 const size_t new_size = gc_mark_stack_->Capacity() * 2;
567 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
568 gc_mark_stack_->End());
569 gc_mark_stack_->Resize(new_size);
570 for (auto& ref : temp) {
571 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
572 }
573 DCHECK(!gc_mark_stack_->IsFull());
574}
575
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800576void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700577 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800578 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700579 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
580 CHECK(thread_running_gc_ != nullptr);
581 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700582 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
583 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700584 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
585 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700586 if (UNLIKELY(gc_mark_stack_->IsFull())) {
587 ExpandGcMarkStack();
588 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700589 gc_mark_stack_->PushBack(to_ref);
590 } else {
591 // Otherwise, use a thread-local mark stack.
592 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
593 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
594 MutexLock mu(self, mark_stack_lock_);
595 // Get a new thread local mark stack.
596 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
597 if (!pooled_mark_stacks_.empty()) {
598 // Use a pooled mark stack.
599 new_tl_mark_stack = pooled_mark_stacks_.back();
600 pooled_mark_stacks_.pop_back();
601 } else {
602 // None pooled. Create a new one.
603 new_tl_mark_stack =
604 accounting::AtomicStack<mirror::Object>::Create(
605 "thread local mark stack", 4 * KB, 4 * KB);
606 }
607 DCHECK(new_tl_mark_stack != nullptr);
608 DCHECK(new_tl_mark_stack->IsEmpty());
609 new_tl_mark_stack->PushBack(to_ref);
610 self->SetThreadLocalMarkStack(new_tl_mark_stack);
611 if (tl_mark_stack != nullptr) {
612 // Store the old full stack into a vector.
613 revoked_mark_stacks_.push_back(tl_mark_stack);
614 }
615 } else {
616 tl_mark_stack->PushBack(to_ref);
617 }
618 }
619 } else if (mark_stack_mode == kMarkStackModeShared) {
620 // Access the shared GC mark stack with a lock.
621 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700622 if (UNLIKELY(gc_mark_stack_->IsFull())) {
623 ExpandGcMarkStack();
624 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700625 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800626 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700627 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700628 static_cast<uint32_t>(kMarkStackModeGcExclusive))
629 << "ref=" << to_ref
630 << " self->gc_marking=" << self->GetIsGcMarking()
631 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700632 CHECK(self == thread_running_gc_)
633 << "Only GC-running thread should access the mark stack "
634 << "in the GC exclusive mark stack mode";
635 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700636 if (UNLIKELY(gc_mark_stack_->IsFull())) {
637 ExpandGcMarkStack();
638 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700639 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800640 }
641}
642
643accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
644 return heap_->allocation_stack_.get();
645}
646
647accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
648 return heap_->live_stack_.get();
649}
650
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800651// The following visitors are that used to verify that there's no
652// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700653class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800654 public:
655 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
656 : collector_(collector) {}
657
658 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700659 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800660 if (ref == nullptr) {
661 // OK.
662 return;
663 }
664 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
665 if (kUseBakerReadBarrier) {
666 if (collector_->RegionSpace()->IsInToSpace(ref)) {
667 CHECK(ref->GetReadBarrierPointer() == nullptr)
668 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
669 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
670 } else {
671 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
672 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
673 collector_->IsOnAllocStack(ref)))
674 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
675 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
676 << " but isn't on the alloc stack (and has white rb_ptr)."
677 << " Is it in the non-moving space="
678 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
679 }
680 }
681 }
682
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700683 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700684 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800685 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700686 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800687 }
688
689 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700690 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800691};
692
693class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
694 public:
695 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
696 : collector_(collector) {}
697
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700698 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700699 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800700 mirror::Object* ref =
701 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
702 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
703 visitor(ref);
704 }
705 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700706 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800707 CHECK(klass->IsTypeOfReferenceClass());
708 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
709 }
710
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700711 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
712 SHARED_REQUIRES(Locks::mutator_lock_) {
713 if (!root->IsNull()) {
714 VisitRoot(root);
715 }
716 }
717
718 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
719 SHARED_REQUIRES(Locks::mutator_lock_) {
720 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
721 visitor(root->AsMirrorPtr());
722 }
723
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800724 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700725 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800726};
727
728class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
729 public:
730 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
731 : collector_(collector) {}
732 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700733 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800734 ObjectCallback(obj, collector_);
735 }
736 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700737 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800738 CHECK(obj != nullptr);
739 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
740 space::RegionSpace* region_space = collector->RegionSpace();
741 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
742 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700743 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800744 if (kUseBakerReadBarrier) {
745 if (collector->RegionSpace()->IsInToSpace(obj)) {
746 CHECK(obj->GetReadBarrierPointer() == nullptr)
747 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
748 } else {
749 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
750 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
751 collector->IsOnAllocStack(obj)))
752 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
753 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
754 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
755 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
756 }
757 }
758 }
759
760 private:
761 ConcurrentCopying* const collector_;
762};
763
764// Verify there's no from-space references left after the marking phase.
765void ConcurrentCopying::VerifyNoFromSpaceReferences() {
766 Thread* self = Thread::Current();
767 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700768 // Verify all threads have is_gc_marking to be false
769 {
770 MutexLock mu(self, *Locks::thread_list_lock_);
771 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
772 for (Thread* thread : thread_list) {
773 CHECK(!thread->GetIsGcMarking());
774 }
775 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800776 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
777 // Roots.
778 {
779 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700780 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
781 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800782 }
783 // The to-space.
784 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
785 this);
786 // Non-moving spaces.
787 {
788 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
789 heap_->GetMarkBitmap()->Visit(visitor);
790 }
791 // The alloc stack.
792 {
793 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800794 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
795 it < end; ++it) {
796 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800797 if (obj != nullptr && obj->GetClass() != nullptr) {
798 // TODO: need to call this only if obj is alive?
799 ref_visitor(obj);
800 visitor(obj);
801 }
802 }
803 }
804 // TODO: LOS. But only refs in LOS are classes.
805}
806
807// The following visitors are used to assert the to-space invariant.
808class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
809 public:
810 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
811 : collector_(collector) {}
812
813 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700814 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800815 if (ref == nullptr) {
816 // OK.
817 return;
818 }
819 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
820 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800821
822 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700823 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800824};
825
826class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
827 public:
828 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
829 : collector_(collector) {}
830
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700831 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700832 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800833 mirror::Object* ref =
834 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
835 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
836 visitor(ref);
837 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700838 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700839 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800840 CHECK(klass->IsTypeOfReferenceClass());
841 }
842
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700843 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
844 SHARED_REQUIRES(Locks::mutator_lock_) {
845 if (!root->IsNull()) {
846 VisitRoot(root);
847 }
848 }
849
850 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
851 SHARED_REQUIRES(Locks::mutator_lock_) {
852 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
853 visitor(root->AsMirrorPtr());
854 }
855
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800856 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700857 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800858};
859
860class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
861 public:
862 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
863 : collector_(collector) {}
864 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700865 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800866 ObjectCallback(obj, collector_);
867 }
868 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700869 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800870 CHECK(obj != nullptr);
871 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
872 space::RegionSpace* region_space = collector->RegionSpace();
873 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
874 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
875 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700876 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800877 }
878
879 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700880 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800881};
882
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700883class RevokeThreadLocalMarkStackCheckpoint : public Closure {
884 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100885 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
886 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700887 : concurrent_copying_(concurrent_copying),
888 disable_weak_ref_access_(disable_weak_ref_access) {
889 }
890
891 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
892 // Note: self is not necessarily equal to thread since thread may be suspended.
893 Thread* self = Thread::Current();
894 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
895 << thread->GetState() << " thread " << thread << " self " << self;
896 // Revoke thread local mark stacks.
897 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
898 if (tl_mark_stack != nullptr) {
899 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
900 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
901 thread->SetThreadLocalMarkStack(nullptr);
902 }
903 // Disable weak ref access.
904 if (disable_weak_ref_access_) {
905 thread->SetWeakRefAccessEnabled(false);
906 }
907 // If thread is a running mutator, then act on behalf of the garbage collector.
908 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700909 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700910 }
911
912 private:
913 ConcurrentCopying* const concurrent_copying_;
914 const bool disable_weak_ref_access_;
915};
916
917void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
918 Thread* self = Thread::Current();
919 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
920 ThreadList* thread_list = Runtime::Current()->GetThreadList();
921 gc_barrier_->Init(self, 0);
922 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
923 // If there are no threads to wait which implys that all the checkpoint functions are finished,
924 // then no need to release the mutator lock.
925 if (barrier_count == 0) {
926 return;
927 }
928 Locks::mutator_lock_->SharedUnlock(self);
929 {
930 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
931 gc_barrier_->Increment(self, barrier_count);
932 }
933 Locks::mutator_lock_->SharedLock(self);
934}
935
936void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
937 Thread* self = Thread::Current();
938 CHECK_EQ(self, thread);
939 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
940 if (tl_mark_stack != nullptr) {
941 CHECK(is_marking_);
942 MutexLock mu(self, mark_stack_lock_);
943 revoked_mark_stacks_.push_back(tl_mark_stack);
944 thread->SetThreadLocalMarkStack(nullptr);
945 }
946}
947
948void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800949 if (kVerboseMode) {
950 LOG(INFO) << "ProcessMarkStack. ";
951 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700952 bool empty_prev = false;
953 while (true) {
954 bool empty = ProcessMarkStackOnce();
955 if (empty_prev && empty) {
956 // Saw empty mark stack for a second time, done.
957 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800958 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700959 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800960 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700961}
962
963bool ConcurrentCopying::ProcessMarkStackOnce() {
964 Thread* self = Thread::Current();
965 CHECK(thread_running_gc_ != nullptr);
966 CHECK(self == thread_running_gc_);
967 CHECK(self->GetThreadLocalMarkStack() == nullptr);
968 size_t count = 0;
969 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
970 if (mark_stack_mode == kMarkStackModeThreadLocal) {
971 // Process the thread-local mark stacks and the GC mark stack.
972 count += ProcessThreadLocalMarkStacks(false);
973 while (!gc_mark_stack_->IsEmpty()) {
974 mirror::Object* to_ref = gc_mark_stack_->PopBack();
975 ProcessMarkStackRef(to_ref);
976 ++count;
977 }
978 gc_mark_stack_->Reset();
979 } else if (mark_stack_mode == kMarkStackModeShared) {
980 // Process the shared GC mark stack with a lock.
981 {
982 MutexLock mu(self, mark_stack_lock_);
983 CHECK(revoked_mark_stacks_.empty());
984 }
985 while (true) {
986 std::vector<mirror::Object*> refs;
987 {
988 // Copy refs with lock. Note the number of refs should be small.
989 MutexLock mu(self, mark_stack_lock_);
990 if (gc_mark_stack_->IsEmpty()) {
991 break;
992 }
993 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
994 p != gc_mark_stack_->End(); ++p) {
995 refs.push_back(p->AsMirrorPtr());
996 }
997 gc_mark_stack_->Reset();
998 }
999 for (mirror::Object* ref : refs) {
1000 ProcessMarkStackRef(ref);
1001 ++count;
1002 }
1003 }
1004 } else {
1005 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1006 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1007 {
1008 MutexLock mu(self, mark_stack_lock_);
1009 CHECK(revoked_mark_stacks_.empty());
1010 }
1011 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1012 while (!gc_mark_stack_->IsEmpty()) {
1013 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1014 ProcessMarkStackRef(to_ref);
1015 ++count;
1016 }
1017 gc_mark_stack_->Reset();
1018 }
1019
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001020 // Return true if the stack was empty.
1021 return count == 0;
1022}
1023
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001024size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1025 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1026 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1027 size_t count = 0;
1028 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1029 {
1030 MutexLock mu(Thread::Current(), mark_stack_lock_);
1031 // Make a copy of the mark stack vector.
1032 mark_stacks = revoked_mark_stacks_;
1033 revoked_mark_stacks_.clear();
1034 }
1035 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1036 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1037 mirror::Object* to_ref = p->AsMirrorPtr();
1038 ProcessMarkStackRef(to_ref);
1039 ++count;
1040 }
1041 {
1042 MutexLock mu(Thread::Current(), mark_stack_lock_);
1043 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1044 // The pool has enough. Delete it.
1045 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001046 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001047 // Otherwise, put it into the pool for later reuse.
1048 mark_stack->Reset();
1049 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001050 }
1051 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001052 }
1053 return count;
1054}
1055
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001056inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001057 DCHECK(!region_space_->IsInFromSpace(to_ref));
1058 if (kUseBakerReadBarrier) {
1059 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1060 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1061 << " is_marked=" << IsMarked(to_ref);
1062 }
1063 // Scan ref fields.
1064 Scan(to_ref);
1065 // Mark the gray ref as white or black.
1066 if (kUseBakerReadBarrier) {
1067 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1068 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1069 << " is_marked=" << IsMarked(to_ref);
1070 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001071#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1072 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1073 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1074 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi70c08d32015-09-10 16:01:30 -07001075 // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
1076 // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001077 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
1078 } else {
Hiroshi Yamauchi70c08d32015-09-10 16:01:30 -07001079 // We may occasionally leave a Reference black or white in the queue if its referent happens to
1080 // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
1081 // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
1082 // here in this else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001083 if (kUseBakerReadBarrier) {
1084 if (region_space_->IsInToSpace(to_ref)) {
1085 // If to-space, change from gray to white.
1086 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1087 ReadBarrier::WhitePtr());
1088 CHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001089 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001090 } else {
1091 // If non-moving space/unevac from space, change from gray
1092 // to black. We can't change gray to white because it's not
1093 // safe to use CAS if two threads change values in opposite
1094 // directions (A->B and B->A). So, we change it to black to
1095 // indicate non-moving objects that have been marked
1096 // through. Note we'd need to change from black to white
1097 // later (concurrently).
1098 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1099 ReadBarrier::BlackPtr());
1100 CHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001101 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001102 }
1103 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001104 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001105#else
1106 DCHECK(!kUseBakerReadBarrier);
1107#endif
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001108 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1109 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1110 visitor(to_ref);
1111 }
1112}
1113
1114void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1115 Thread* self = Thread::Current();
1116 CHECK(thread_running_gc_ != nullptr);
1117 CHECK_EQ(self, thread_running_gc_);
1118 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1119 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1120 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1121 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1122 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1123 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1124 weak_ref_access_enabled_.StoreRelaxed(false);
1125 QuasiAtomic::ThreadFenceForConstructor();
1126 // Process the thread local mark stacks one last time after switching to the shared mark stack
1127 // mode and disable weak ref accesses.
1128 ProcessThreadLocalMarkStacks(true);
1129 if (kVerboseMode) {
1130 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1131 }
1132}
1133
1134void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1135 Thread* self = Thread::Current();
1136 CHECK(thread_running_gc_ != nullptr);
1137 CHECK_EQ(self, thread_running_gc_);
1138 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1139 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1140 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1141 static_cast<uint32_t>(kMarkStackModeShared));
1142 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1143 QuasiAtomic::ThreadFenceForConstructor();
1144 if (kVerboseMode) {
1145 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1146 }
1147}
1148
1149void ConcurrentCopying::CheckEmptyMarkStack() {
1150 Thread* self = Thread::Current();
1151 CHECK(thread_running_gc_ != nullptr);
1152 CHECK_EQ(self, thread_running_gc_);
1153 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1154 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1155 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1156 // Thread-local mark stack mode.
1157 RevokeThreadLocalMarkStacks(false);
1158 MutexLock mu(Thread::Current(), mark_stack_lock_);
1159 if (!revoked_mark_stacks_.empty()) {
1160 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1161 while (!mark_stack->IsEmpty()) {
1162 mirror::Object* obj = mark_stack->PopBack();
1163 if (kUseBakerReadBarrier) {
1164 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1165 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1166 << " is_marked=" << IsMarked(obj);
1167 } else {
1168 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1169 << " is_marked=" << IsMarked(obj);
1170 }
1171 }
1172 }
1173 LOG(FATAL) << "mark stack is not empty";
1174 }
1175 } else {
1176 // Shared, GC-exclusive, or off.
1177 MutexLock mu(Thread::Current(), mark_stack_lock_);
1178 CHECK(gc_mark_stack_->IsEmpty());
1179 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001180 }
1181}
1182
1183void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1184 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1185 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001186 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001187}
1188
1189void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1190 {
1191 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1192 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1193 if (kEnableFromSpaceAccountingCheck) {
1194 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1195 }
1196 heap_->MarkAllocStackAsLive(live_stack);
1197 live_stack->Reset();
1198 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001199 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001200 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1201 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1202 if (space->IsContinuousMemMapAllocSpace()) {
1203 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1204 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
1205 continue;
1206 }
1207 TimingLogger::ScopedTiming split2(
1208 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1209 RecordFree(alloc_space->Sweep(swap_bitmaps));
1210 }
1211 }
1212 SweepLargeObjects(swap_bitmaps);
1213}
1214
1215void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1216 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1217 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1218}
1219
1220class ConcurrentCopyingClearBlackPtrsVisitor {
1221 public:
1222 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1223 : collector_(cc) {}
Andreas Gampe65b798e2015-04-06 09:35:22 -07001224#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
1225 NO_RETURN
1226#endif
Mathieu Chartier90443472015-07-16 20:32:27 -07001227 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
1228 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001229 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001230 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1231 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001232 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001233 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001234 }
1235
1236 private:
1237 ConcurrentCopying* const collector_;
1238};
1239
1240// Clear the black ptrs in non-moving objects back to white.
1241void ConcurrentCopying::ClearBlackPtrs() {
1242 CHECK(kUseBakerReadBarrier);
1243 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1244 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1245 for (auto& space : heap_->GetContinuousSpaces()) {
1246 if (space == region_space_) {
1247 continue;
1248 }
1249 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1250 if (kVerboseMode) {
1251 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1252 }
1253 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1254 reinterpret_cast<uintptr_t>(space->Limit()),
1255 visitor);
1256 }
1257 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1258 large_object_space->GetMarkBitmap()->VisitMarkedRange(
1259 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1260 reinterpret_cast<uintptr_t>(large_object_space->End()),
1261 visitor);
1262 // Objects on the allocation stack?
1263 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1264 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001265 auto* it = GetAllocationStack()->Begin();
1266 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001267 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001268 CHECK_LT(it, end);
1269 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001270 if (obj != nullptr) {
1271 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001272 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001273 }
1274 }
1275 }
1276}
1277
1278void ConcurrentCopying::ReclaimPhase() {
1279 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1280 if (kVerboseMode) {
1281 LOG(INFO) << "GC ReclaimPhase";
1282 }
1283 Thread* self = Thread::Current();
1284
1285 {
1286 // Double-check that the mark stack is empty.
1287 // Note: need to set this after VerifyNoFromSpaceRef().
1288 is_asserting_to_space_invariant_ = false;
1289 QuasiAtomic::ThreadFenceForConstructor();
1290 if (kVerboseMode) {
1291 LOG(INFO) << "Issue an empty check point. ";
1292 }
1293 IssueEmptyCheckpoint();
1294 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001295 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1296 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001297 }
1298
1299 {
1300 // Record freed objects.
1301 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1302 // Don't include thread-locals that are in the to-space.
1303 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1304 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1305 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1306 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1307 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1308 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1309 if (kEnableFromSpaceAccountingCheck) {
1310 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1311 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1312 }
1313 CHECK_LE(to_objects, from_objects);
1314 CHECK_LE(to_bytes, from_bytes);
1315 int64_t freed_bytes = from_bytes - to_bytes;
1316 int64_t freed_objects = from_objects - to_objects;
1317 if (kVerboseMode) {
1318 LOG(INFO) << "RecordFree:"
1319 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1320 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1321 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1322 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1323 << " from_space size=" << region_space_->FromSpaceSize()
1324 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1325 << " to_space size=" << region_space_->ToSpaceSize();
1326 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1327 }
1328 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1329 if (kVerboseMode) {
1330 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1331 }
1332 }
1333
1334 {
1335 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1336 ComputeUnevacFromSpaceLiveRatio();
1337 }
1338
1339 {
1340 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1341 region_space_->ClearFromSpace();
1342 }
1343
1344 {
1345 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1346 if (kUseBakerReadBarrier) {
1347 ClearBlackPtrs();
1348 }
1349 Sweep(false);
1350 SwapBitmaps();
1351 heap_->UnBindBitmaps();
1352
1353 // Remove bitmaps for the immune spaces.
1354 while (!cc_bitmaps_.empty()) {
1355 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1356 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1357 delete cc_bitmap;
1358 cc_bitmaps_.pop_back();
1359 }
1360 region_space_bitmap_ = nullptr;
1361 }
1362
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001363 CheckEmptyMarkStack();
1364
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001365 if (kVerboseMode) {
1366 LOG(INFO) << "GC end of ReclaimPhase";
1367 }
1368}
1369
1370class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1371 public:
1372 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1373 : collector_(cc) {}
Mathieu Chartier90443472015-07-16 20:32:27 -07001374 void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
1375 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001376 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001377 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1378 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001379 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001380 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001381 // Clear the black ptr.
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001382 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1383 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001384 }
1385 size_t obj_size = ref->SizeOf();
1386 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1387 collector_->region_space_->AddLiveBytes(ref, alloc_size);
1388 }
1389
1390 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001391 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001392};
1393
1394// Compute how much live objects are left in regions.
1395void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1396 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1397 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1398 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1399 reinterpret_cast<uintptr_t>(region_space_->Limit()),
1400 visitor);
1401}
1402
1403// Assert the to-space invariant.
1404void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1405 mirror::Object* ref) {
1406 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1407 if (is_asserting_to_space_invariant_) {
1408 if (region_space_->IsInToSpace(ref)) {
1409 // OK.
1410 return;
1411 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1412 CHECK(region_space_bitmap_->Test(ref)) << ref;
1413 } else if (region_space_->IsInFromSpace(ref)) {
1414 // Not OK. Do extra logging.
1415 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001416 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001417 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001418 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001419 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1420 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001421 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1422 }
1423 }
1424}
1425
1426class RootPrinter {
1427 public:
1428 RootPrinter() { }
1429
1430 template <class MirrorType>
1431 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001432 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001433 if (!root->IsNull()) {
1434 VisitRoot(root);
1435 }
1436 }
1437
1438 template <class MirrorType>
1439 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001440 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001441 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1442 }
1443
1444 template <class MirrorType>
1445 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001446 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001447 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1448 }
1449};
1450
1451void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1452 mirror::Object* ref) {
1453 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1454 if (is_asserting_to_space_invariant_) {
1455 if (region_space_->IsInToSpace(ref)) {
1456 // OK.
1457 return;
1458 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1459 CHECK(region_space_bitmap_->Test(ref)) << ref;
1460 } else if (region_space_->IsInFromSpace(ref)) {
1461 // Not OK. Do extra logging.
1462 if (gc_root_source == nullptr) {
1463 // No info.
1464 } else if (gc_root_source->HasArtField()) {
1465 ArtField* field = gc_root_source->GetArtField();
1466 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1467 RootPrinter root_printer;
1468 field->VisitRoots(root_printer);
1469 } else if (gc_root_source->HasArtMethod()) {
1470 ArtMethod* method = gc_root_source->GetArtMethod();
1471 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1472 RootPrinter root_printer;
Mathieu Chartier1147b9b2015-09-14 18:50:08 -07001473 method->VisitRoots(root_printer, sizeof(void*));
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001474 }
1475 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1476 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1477 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1478 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1479 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1480 } else {
1481 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1482 }
1483 }
1484}
1485
1486void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1487 if (kUseBakerReadBarrier) {
1488 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1489 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1490 } else {
1491 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1492 }
1493 if (region_space_->IsInFromSpace(obj)) {
1494 LOG(INFO) << "holder is in the from-space.";
1495 } else if (region_space_->IsInToSpace(obj)) {
1496 LOG(INFO) << "holder is in the to-space.";
1497 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1498 LOG(INFO) << "holder is in the unevac from-space.";
1499 if (region_space_bitmap_->Test(obj)) {
1500 LOG(INFO) << "holder is marked in the region space bitmap.";
1501 } else {
1502 LOG(INFO) << "holder is not marked in the region space bitmap.";
1503 }
1504 } else {
1505 // In a non-moving space.
1506 if (immune_region_.ContainsObject(obj)) {
1507 LOG(INFO) << "holder is in the image or the zygote space.";
1508 accounting::ContinuousSpaceBitmap* cc_bitmap =
1509 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1510 CHECK(cc_bitmap != nullptr)
1511 << "An immune space object must have a bitmap.";
1512 if (cc_bitmap->Test(obj)) {
1513 LOG(INFO) << "holder is marked in the bit map.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001514 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001515 LOG(INFO) << "holder is NOT marked in the bit map.";
1516 }
1517 } else {
1518 LOG(INFO) << "holder is in a non-moving (or main) space.";
1519 accounting::ContinuousSpaceBitmap* mark_bitmap =
1520 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1521 accounting::LargeObjectBitmap* los_bitmap =
1522 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1523 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1524 bool is_los = mark_bitmap == nullptr;
1525 if (!is_los && mark_bitmap->Test(obj)) {
1526 LOG(INFO) << "holder is marked in the mark bit map.";
1527 } else if (is_los && los_bitmap->Test(obj)) {
1528 LOG(INFO) << "holder is marked in the los bit map.";
1529 } else {
1530 // If ref is on the allocation stack, then it is considered
1531 // mark/alive (but not necessarily on the live stack.)
1532 if (IsOnAllocStack(obj)) {
1533 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001534 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001535 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001536 }
1537 }
1538 }
1539 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001540 LOG(INFO) << "offset=" << offset.SizeValue();
1541}
1542
1543void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1544 mirror::Object* ref) {
1545 // In a non-moving spaces. Check that the ref is marked.
1546 if (immune_region_.ContainsObject(ref)) {
1547 accounting::ContinuousSpaceBitmap* cc_bitmap =
1548 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1549 CHECK(cc_bitmap != nullptr)
1550 << "An immune space ref must have a bitmap. " << ref;
1551 if (kUseBakerReadBarrier) {
1552 CHECK(cc_bitmap->Test(ref))
1553 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1554 << obj->GetReadBarrierPointer() << " ref=" << ref;
1555 } else {
1556 CHECK(cc_bitmap->Test(ref))
1557 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1558 }
1559 } else {
1560 accounting::ContinuousSpaceBitmap* mark_bitmap =
1561 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1562 accounting::LargeObjectBitmap* los_bitmap =
1563 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1564 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1565 bool is_los = mark_bitmap == nullptr;
1566 if ((!is_los && mark_bitmap->Test(ref)) ||
1567 (is_los && los_bitmap->Test(ref))) {
1568 // OK.
1569 } else {
1570 // If ref is on the allocation stack, then it may not be
1571 // marked live, but considered marked/alive (but not
1572 // necessarily on the live stack).
1573 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1574 << "obj=" << obj << " ref=" << ref;
1575 }
1576 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001577}
1578
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001579// Used to scan ref fields of an object.
1580class ConcurrentCopyingRefFieldsVisitor {
1581 public:
1582 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1583 : collector_(collector) {}
1584
1585 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001586 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1587 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001588 collector_->Process(obj, offset);
1589 }
1590
1591 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001592 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001593 CHECK(klass->IsTypeOfReferenceClass());
1594 collector_->DelayReferenceReferent(klass, ref);
1595 }
1596
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001597 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001598 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001599 SHARED_REQUIRES(Locks::mutator_lock_) {
1600 if (!root->IsNull()) {
1601 VisitRoot(root);
1602 }
1603 }
1604
1605 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001606 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001607 SHARED_REQUIRES(Locks::mutator_lock_) {
1608 collector_->MarkRoot(root);
1609 }
1610
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001611 private:
1612 ConcurrentCopying* const collector_;
1613};
1614
1615// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001616inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001617 DCHECK(!region_space_->IsInFromSpace(to_ref));
1618 ConcurrentCopyingRefFieldsVisitor visitor(this);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001619 to_ref->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001620}
1621
1622// Process a field.
1623inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001624 mirror::Object* ref = obj->GetFieldObject<
1625 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001626 mirror::Object* to_ref = Mark(ref);
1627 if (to_ref == ref) {
1628 return;
1629 }
1630 // This may fail if the mutator writes to the field at the same time. But it's ok.
1631 mirror::Object* expected_ref = ref;
1632 mirror::Object* new_ref = to_ref;
1633 do {
1634 if (expected_ref !=
1635 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1636 // It was updated by the mutator.
1637 break;
1638 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001639 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
1640 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001641}
1642
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001643// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001644inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001645 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1646 for (size_t i = 0; i < count; ++i) {
1647 mirror::Object** root = roots[i];
1648 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001649 mirror::Object* to_ref = Mark(ref);
1650 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001651 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001652 }
1653 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1654 mirror::Object* expected_ref = ref;
1655 mirror::Object* new_ref = to_ref;
1656 do {
1657 if (expected_ref != addr->LoadRelaxed()) {
1658 // It was updated by the mutator.
1659 break;
1660 }
1661 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1662 }
1663}
1664
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001665inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001666 DCHECK(!root->IsNull());
1667 mirror::Object* const ref = root->AsMirrorPtr();
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001668 mirror::Object* to_ref = Mark(ref);
1669 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001670 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1671 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1672 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001673 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001674 do {
1675 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1676 // It was updated by the mutator.
1677 break;
1678 }
1679 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1680 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001681}
1682
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001683inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001684 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1685 const RootInfo& info ATTRIBUTE_UNUSED) {
1686 for (size_t i = 0; i < count; ++i) {
1687 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1688 if (!root->IsNull()) {
1689 MarkRoot(root);
1690 }
1691 }
1692}
1693
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001694// Fill the given memory block with a dummy object. Used to fill in a
1695// copy of objects that was lost in race.
1696void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Roland Levillain14d90572015-07-16 10:52:26 +01001697 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001698 memset(dummy_obj, 0, byte_size);
1699 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1700 CHECK(int_array_class != nullptr);
1701 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1702 size_t component_size = int_array_class->GetComponentSize();
1703 CHECK_EQ(component_size, sizeof(int32_t));
1704 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1705 if (data_offset > byte_size) {
1706 // An int array is too big. Use java.lang.Object.
1707 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1708 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1709 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1710 dummy_obj->SetClass(java_lang_Object);
1711 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1712 } else {
1713 // Use an int array.
1714 dummy_obj->SetClass(int_array_class);
1715 CHECK(dummy_obj->IsArrayInstance());
1716 int32_t length = (byte_size - data_offset) / component_size;
1717 dummy_obj->AsArray()->SetLength(length);
1718 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1719 << "byte_size=" << byte_size << " length=" << length
1720 << " component_size=" << component_size << " data_offset=" << data_offset;
1721 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1722 << "byte_size=" << byte_size << " length=" << length
1723 << " component_size=" << component_size << " data_offset=" << data_offset;
1724 }
1725}
1726
1727// Reuse the memory blocks that were copy of objects that were lost in race.
1728mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1729 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001730 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001731 Thread* self = Thread::Current();
1732 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1733 MutexLock mu(self, skipped_blocks_lock_);
1734 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1735 if (it == skipped_blocks_map_.end()) {
1736 // Not found.
1737 return nullptr;
1738 }
1739 {
1740 size_t byte_size = it->first;
1741 CHECK_GE(byte_size, alloc_size);
1742 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1743 // If remainder would be too small for a dummy object, retry with a larger request size.
1744 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1745 if (it == skipped_blocks_map_.end()) {
1746 // Not found.
1747 return nullptr;
1748 }
Roland Levillain14d90572015-07-16 10:52:26 +01001749 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001750 CHECK_GE(it->first - alloc_size, min_object_size)
1751 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1752 }
1753 }
1754 // Found a block.
1755 CHECK(it != skipped_blocks_map_.end());
1756 size_t byte_size = it->first;
1757 uint8_t* addr = it->second;
1758 CHECK_GE(byte_size, alloc_size);
1759 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
Roland Levillain14d90572015-07-16 10:52:26 +01001760 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001761 if (kVerboseMode) {
1762 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1763 }
1764 skipped_blocks_map_.erase(it);
1765 memset(addr, 0, byte_size);
1766 if (byte_size > alloc_size) {
1767 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001768 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001769 CHECK_GE(byte_size - alloc_size, min_object_size);
1770 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1771 byte_size - alloc_size);
1772 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1773 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1774 }
1775 return reinterpret_cast<mirror::Object*>(addr);
1776}
1777
1778mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1779 DCHECK(region_space_->IsInFromSpace(from_ref));
1780 // No read barrier to avoid nested RB that might violate the to-space
1781 // invariant. Note that from_ref is a from space ref so the SizeOf()
1782 // call will access the from-space meta objects, but it's ok and necessary.
1783 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1784 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1785 size_t region_space_bytes_allocated = 0U;
1786 size_t non_moving_space_bytes_allocated = 0U;
1787 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001788 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001789 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001790 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001791 bytes_allocated = region_space_bytes_allocated;
1792 if (to_ref != nullptr) {
1793 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1794 }
1795 bool fall_back_to_non_moving = false;
1796 if (UNLIKELY(to_ref == nullptr)) {
1797 // Failed to allocate in the region space. Try the skipped blocks.
1798 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1799 if (to_ref != nullptr) {
1800 // Succeeded to allocate in a skipped block.
1801 if (heap_->use_tlab_) {
1802 // This is necessary for the tlab case as it's not accounted in the space.
1803 region_space_->RecordAlloc(to_ref);
1804 }
1805 bytes_allocated = region_space_alloc_size;
1806 } else {
1807 // Fall back to the non-moving space.
1808 fall_back_to_non_moving = true;
1809 if (kVerboseMode) {
1810 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1811 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1812 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1813 }
1814 fall_back_to_non_moving = true;
1815 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001816 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001817 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1818 bytes_allocated = non_moving_space_bytes_allocated;
1819 // Mark it in the mark bitmap.
1820 accounting::ContinuousSpaceBitmap* mark_bitmap =
1821 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1822 CHECK(mark_bitmap != nullptr);
1823 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1824 }
1825 }
1826 DCHECK(to_ref != nullptr);
1827
1828 // Attempt to install the forward pointer. This is in a loop as the
1829 // lock word atomic write can fail.
1830 while (true) {
1831 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1832 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001833
1834 LockWord old_lock_word = to_ref->GetLockWord(false);
1835
1836 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1837 // Lost the race. Another thread (either GC or mutator) stored
1838 // the forwarding pointer first. Make the lost copy (to_ref)
1839 // look like a valid but dead (dummy) object and keep it for
1840 // future reuse.
1841 FillWithDummyObject(to_ref, bytes_allocated);
1842 if (!fall_back_to_non_moving) {
1843 DCHECK(region_space_->IsInToSpace(to_ref));
1844 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1845 // Free the large alloc.
1846 region_space_->FreeLarge(to_ref, bytes_allocated);
1847 } else {
1848 // Record the lost copy for later reuse.
1849 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1850 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1851 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1852 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1853 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1854 reinterpret_cast<uint8_t*>(to_ref)));
1855 }
1856 } else {
1857 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1858 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1859 // Free the non-moving-space chunk.
1860 accounting::ContinuousSpaceBitmap* mark_bitmap =
1861 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1862 CHECK(mark_bitmap != nullptr);
1863 CHECK(mark_bitmap->Clear(to_ref));
1864 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1865 }
1866
1867 // Get the winner's forward ptr.
1868 mirror::Object* lost_fwd_ptr = to_ref;
1869 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1870 CHECK(to_ref != nullptr);
1871 CHECK_NE(to_ref, lost_fwd_ptr);
1872 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1873 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1874 return to_ref;
1875 }
1876
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001877 // Set the gray ptr.
1878 if (kUseBakerReadBarrier) {
1879 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1880 }
1881
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001882 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1883
1884 // Try to atomically write the fwd ptr.
1885 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1886 if (LIKELY(success)) {
1887 // The CAS succeeded.
1888 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1889 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1890 if (LIKELY(!fall_back_to_non_moving)) {
1891 DCHECK(region_space_->IsInToSpace(to_ref));
1892 } else {
1893 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1894 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1895 }
1896 if (kUseBakerReadBarrier) {
1897 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1898 }
1899 DCHECK(GetFwdPtr(from_ref) == to_ref);
1900 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001901 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001902 return to_ref;
1903 } else {
1904 // The CAS failed. It may have lost the race or may have failed
1905 // due to monitor/hashcode ops. Either way, retry.
1906 }
1907 }
1908}
1909
1910mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1911 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001912 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1913 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001914 // It's already marked.
1915 return from_ref;
1916 }
1917 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001918 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001919 to_ref = GetFwdPtr(from_ref);
1920 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1921 heap_->non_moving_space_->HasAddress(to_ref))
1922 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001923 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001924 if (region_space_bitmap_->Test(from_ref)) {
1925 to_ref = from_ref;
1926 } else {
1927 to_ref = nullptr;
1928 }
1929 } else {
1930 // from_ref is in a non-moving space.
1931 if (immune_region_.ContainsObject(from_ref)) {
1932 accounting::ContinuousSpaceBitmap* cc_bitmap =
1933 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1934 DCHECK(cc_bitmap != nullptr)
1935 << "An immune space object must have a bitmap";
1936 if (kIsDebugBuild) {
1937 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1938 << "Immune space object must be already marked";
1939 }
1940 if (cc_bitmap->Test(from_ref)) {
1941 // Already marked.
1942 to_ref = from_ref;
1943 } else {
1944 // Newly marked.
1945 to_ref = nullptr;
1946 }
1947 } else {
1948 // Non-immune non-moving space. Use the mark bitmap.
1949 accounting::ContinuousSpaceBitmap* mark_bitmap =
1950 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1951 accounting::LargeObjectBitmap* los_bitmap =
1952 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1953 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1954 bool is_los = mark_bitmap == nullptr;
1955 if (!is_los && mark_bitmap->Test(from_ref)) {
1956 // Already marked.
1957 to_ref = from_ref;
1958 } else if (is_los && los_bitmap->Test(from_ref)) {
1959 // Already marked in LOS.
1960 to_ref = from_ref;
1961 } else {
1962 // Not marked.
1963 if (IsOnAllocStack(from_ref)) {
1964 // If on the allocation stack, it's considered marked.
1965 to_ref = from_ref;
1966 } else {
1967 // Not marked.
1968 to_ref = nullptr;
1969 }
1970 }
1971 }
1972 }
1973 return to_ref;
1974}
1975
1976bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1977 QuasiAtomic::ThreadFenceAcquire();
1978 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001979 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001980}
1981
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001982mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
1983 // ref is in a non-moving space (from_ref == to_ref).
1984 DCHECK(!region_space_->HasAddress(ref)) << ref;
1985 if (immune_region_.ContainsObject(ref)) {
1986 accounting::ContinuousSpaceBitmap* cc_bitmap =
1987 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1988 DCHECK(cc_bitmap != nullptr)
1989 << "An immune space object must have a bitmap";
1990 if (kIsDebugBuild) {
1991 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
1992 << "Immune space object must be already marked";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001993 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001994 // This may or may not succeed, which is ok.
1995 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001996 ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001997 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001998 if (cc_bitmap->AtomicTestAndSet(ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001999 // Already marked.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002000 } else {
2001 // Newly marked.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002002 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002003 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002004 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002005 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002006 }
2007 } else {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002008 // Use the mark bitmap.
2009 accounting::ContinuousSpaceBitmap* mark_bitmap =
2010 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2011 accounting::LargeObjectBitmap* los_bitmap =
2012 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2013 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2014 bool is_los = mark_bitmap == nullptr;
2015 if (!is_los && mark_bitmap->Test(ref)) {
2016 // Already marked.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002017 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002018 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2019 ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002020 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002021 } else if (is_los && los_bitmap->Test(ref)) {
2022 // Already marked in LOS.
2023 if (kUseBakerReadBarrier) {
2024 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2025 ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002026 }
2027 } else {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002028 // Not marked.
2029 if (IsOnAllocStack(ref)) {
2030 // If it's on the allocation stack, it's considered marked. Keep it white.
2031 // Objects on the allocation stack need not be marked.
2032 if (!is_los) {
2033 DCHECK(!mark_bitmap->Test(ref));
2034 } else {
2035 DCHECK(!los_bitmap->Test(ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002036 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002037 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002038 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002039 }
2040 } else {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002041 // Not marked or on the allocation stack. Try to mark it.
2042 // This may or may not succeed, which is ok.
2043 if (kUseBakerReadBarrier) {
2044 ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2045 }
2046 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2047 // Already marked.
2048 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2049 // Already marked in LOS.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002050 } else {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002051 // Newly marked.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002052 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002053 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002054 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002055 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002056 }
2057 }
2058 }
2059 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002060 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002061}
2062
2063void ConcurrentCopying::FinishPhase() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002064 {
2065 MutexLock mu(Thread::Current(), mark_stack_lock_);
2066 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2067 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002068 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002069 {
2070 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2071 skipped_blocks_map_.clear();
2072 }
2073 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2074 heap_->ClearMarkedObjects();
2075}
2076
Mathieu Chartier97509952015-07-13 14:35:43 -07002077bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002078 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002079 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002080 if (to_ref == nullptr) {
2081 return false;
2082 }
2083 if (from_ref != to_ref) {
2084 QuasiAtomic::ThreadFenceRelease();
2085 field->Assign(to_ref);
2086 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2087 }
2088 return true;
2089}
2090
Mathieu Chartier97509952015-07-13 14:35:43 -07002091mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2092 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002093}
2094
2095void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002096 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002097}
2098
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002099void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002100 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002101 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002102 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2103 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002104 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002105}
2106
2107void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2108 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2109 region_space_->RevokeAllThreadLocalBuffers();
2110}
2111
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002112} // namespace collector
2113} // namespace gc
2114} // namespace art