blob: 19d4e1a983bc5142f7752769961e568ae5bb8465 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080019#include "gc/accounting/heap_bitmap-inl.h"
20#include "gc/accounting/space_bitmap-inl.h"
21#include "gc/space/image_space.h"
22#include "gc/space/space.h"
23#include "intern_table.h"
24#include "mirror/art_field-inl.h"
25#include "mirror/object-inl.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "thread_list.h"
29#include "well_known_classes.h"
30
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070031namespace art {
32namespace gc {
33namespace collector {
34
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080035ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
36 : GarbageCollector(heap,
37 name_prefix + (name_prefix.empty() ? "" : " ") +
38 "concurrent copying + mark sweep"),
39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB),
40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0),
42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
43 rb_table_(heap_->GetReadBarrierTable()),
44 force_evacuate_all_(false) {
45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
46 "The region space size and the read barrier table region size must match");
47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
48 {
49 Thread* self = Thread::Current();
50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
51 // Cache this so that we won't have to lock heap_bitmap_lock_ in
52 // Mark() which could cause a nested lock on heap_bitmap_lock_
53 // when GC causes a RB while doing GC or a lock order violation
54 // (class_linker_lock_ and heap_bitmap_lock_).
55 heap_mark_bitmap_ = heap->GetMarkBitmap();
56 }
57}
58
59ConcurrentCopying::~ConcurrentCopying() {
60}
61
62void ConcurrentCopying::RunPhases() {
63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
64 CHECK(!is_active_);
65 is_active_ = true;
66 Thread* self = Thread::Current();
67 Locks::mutator_lock_->AssertNotHeld(self);
68 {
69 ReaderMutexLock mu(self, *Locks::mutator_lock_);
70 InitializePhase();
71 }
72 FlipThreadRoots();
73 {
74 ReaderMutexLock mu(self, *Locks::mutator_lock_);
75 MarkingPhase();
76 }
77 // Verify no from space refs. This causes a pause.
78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
80 ScopedPause pause(this);
81 CheckEmptyMarkQueue();
82 if (kVerboseMode) {
83 LOG(INFO) << "Verifying no from-space refs";
84 }
85 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -070086 if (kVerboseMode) {
87 LOG(INFO) << "Done verifying no from-space refs";
88 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 CheckEmptyMarkQueue();
90 }
91 {
92 ReaderMutexLock mu(self, *Locks::mutator_lock_);
93 ReclaimPhase();
94 }
95 FinishPhase();
96 CHECK(is_active_);
97 is_active_ = false;
98}
99
100void ConcurrentCopying::BindBitmaps() {
101 Thread* self = Thread::Current();
102 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
103 // Mark all of the spaces we never collect as immune.
104 for (const auto& space : heap_->GetContinuousSpaces()) {
105 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
106 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
107 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
108 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
109 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
110 "cc zygote space bitmap";
111 // TODO: try avoiding using bitmaps for image/zygote to save space.
112 accounting::ContinuousSpaceBitmap* bitmap =
113 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
114 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
115 cc_bitmaps_.push_back(bitmap);
116 } else if (space == region_space_) {
117 accounting::ContinuousSpaceBitmap* bitmap =
118 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
119 space->Begin(), space->Capacity());
120 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
121 cc_bitmaps_.push_back(bitmap);
122 region_space_bitmap_ = bitmap;
123 }
124 }
125}
126
127void ConcurrentCopying::InitializePhase() {
128 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
129 if (kVerboseMode) {
130 LOG(INFO) << "GC InitializePhase";
131 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
132 << reinterpret_cast<void*>(region_space_->Limit());
133 }
134 CHECK(mark_queue_.IsEmpty());
135 immune_region_.Reset();
136 bytes_moved_.StoreRelaxed(0);
137 objects_moved_.StoreRelaxed(0);
138 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
139 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
140 GetCurrentIteration()->GetClearSoftReferences()) {
141 force_evacuate_all_ = true;
142 } else {
143 force_evacuate_all_ = false;
144 }
145 BindBitmaps();
146 if (kVerboseMode) {
147 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
148 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
149 LOG(INFO) << "GC end of InitializePhase";
150 }
151}
152
153// Used to switch the thread roots of a thread from from-space refs to to-space refs.
154class ThreadFlipVisitor : public Closure {
155 public:
156 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
157 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
158 }
159
160 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
161 // Note: self is not necessarily equal to thread since thread may be suspended.
162 Thread* self = Thread::Current();
163 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
164 << thread->GetState() << " thread " << thread << " self " << self;
165 if (use_tlab_ && thread->HasTlab()) {
166 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
167 // This must come before the revoke.
168 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
169 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
170 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
171 FetchAndAddSequentiallyConsistent(thread_local_objects);
172 } else {
173 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
174 }
175 }
176 if (kUseThreadLocalAllocationStack) {
177 thread->RevokeThreadLocalAllocationStack();
178 }
179 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700180 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800181 concurrent_copying_->GetBarrier().Pass(self);
182 }
183
184 private:
185 ConcurrentCopying* const concurrent_copying_;
186 const bool use_tlab_;
187};
188
189// Called back from Runtime::FlipThreadRoots() during a pause.
190class FlipCallback : public Closure {
191 public:
192 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
193 : concurrent_copying_(concurrent_copying) {
194 }
195
196 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
197 ConcurrentCopying* cc = concurrent_copying_;
198 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
199 // Note: self is not necessarily equal to thread since thread may be suspended.
200 Thread* self = Thread::Current();
201 CHECK(thread == self);
202 Locks::mutator_lock_->AssertExclusiveHeld(self);
203 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
204 cc->SwapStacks(self);
205 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
206 cc->RecordLiveStackFreezeSize(self);
207 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
208 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
209 }
210 cc->is_marking_ = true;
211 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800212 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800213 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700214 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800215 }
216 }
217
218 private:
219 ConcurrentCopying* const concurrent_copying_;
220};
221
222// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
223void ConcurrentCopying::FlipThreadRoots() {
224 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
225 if (kVerboseMode) {
226 LOG(INFO) << "time=" << region_space_->Time();
227 region_space_->DumpNonFreeRegions(LOG(INFO));
228 }
229 Thread* self = Thread::Current();
230 Locks::mutator_lock_->AssertNotHeld(self);
231 gc_barrier_->Init(self, 0);
232 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
233 FlipCallback flip_callback(this);
234 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
235 &thread_flip_visitor, &flip_callback, this);
236 {
237 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
238 gc_barrier_->Increment(self, barrier_count);
239 }
240 is_asserting_to_space_invariant_ = true;
241 QuasiAtomic::ThreadFenceForConstructor();
242 if (kVerboseMode) {
243 LOG(INFO) << "time=" << region_space_->Time();
244 region_space_->DumpNonFreeRegions(LOG(INFO));
245 LOG(INFO) << "GC end of FlipThreadRoots";
246 }
247}
248
249void ConcurrentCopying::SwapStacks(Thread* self) {
250 heap_->SwapStacks(self);
251}
252
253void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
254 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
255 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
256}
257
258// Used to visit objects in the immune spaces.
259class ConcurrentCopyingImmuneSpaceObjVisitor {
260 public:
261 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
262 : collector_(cc) {}
263
264 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
265 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
266 DCHECK(obj != nullptr);
267 DCHECK(collector_->immune_region_.ContainsObject(obj));
268 accounting::ContinuousSpaceBitmap* cc_bitmap =
269 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
270 DCHECK(cc_bitmap != nullptr)
271 << "An immune space object must have a bitmap";
272 if (kIsDebugBuild) {
273 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
274 << "Immune space object must be already marked";
275 }
276 // This may or may not succeed, which is ok.
277 if (kUseBakerReadBarrier) {
278 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
279 }
280 if (cc_bitmap->AtomicTestAndSet(obj)) {
281 // Already marked. Do nothing.
282 } else {
283 // Newly marked. Set the gray bit and push it onto the mark stack.
284 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
285 collector_->PushOntoMarkStack<true>(obj);
286 }
287 }
288
289 private:
290 ConcurrentCopying* collector_;
291};
292
293class EmptyCheckpoint : public Closure {
294 public:
295 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
296 : concurrent_copying_(concurrent_copying) {
297 }
298
299 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
300 // Note: self is not necessarily equal to thread since thread may be suspended.
301 Thread* self = Thread::Current();
302 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
303 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800304 // If thread is a running mutator, then act on behalf of the garbage collector.
305 // See the code in ThreadList::RunCheckpoint.
306 if (thread->GetState() == kRunnable) {
307 concurrent_copying_->GetBarrier().Pass(self);
308 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800309 }
310
311 private:
312 ConcurrentCopying* const concurrent_copying_;
313};
314
315// Concurrently mark roots that are guarded by read barriers and process the mark stack.
316void ConcurrentCopying::MarkingPhase() {
317 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
318 if (kVerboseMode) {
319 LOG(INFO) << "GC MarkingPhase";
320 }
321 {
322 // Mark the image root. The WB-based collectors do not need to
323 // scan the image objects from roots by relying on the card table,
324 // but it's necessary for the RB to-space invariant to hold.
325 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
326 gc::space::ImageSpace* image = heap_->GetImageSpace();
327 if (image != nullptr) {
328 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
329 mirror::Object* marked_image_root = Mark(image_root);
330 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
331 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
332 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
333 }
334 }
335 }
336 {
337 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700338 Runtime::Current()->VisitConstantRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800339 }
340 {
341 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700342 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800343 }
344 {
345 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700346 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800347 }
348 {
349 // TODO: don't visit the transaction roots if it's not active.
350 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700351 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800352 }
353
354 // Immune spaces.
355 for (auto& space : heap_->GetContinuousSpaces()) {
356 if (immune_region_.ContainsSpace(space)) {
357 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
358 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
359 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
360 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
361 reinterpret_cast<uintptr_t>(space->Limit()),
362 visitor);
363 }
364 }
365
366 Thread* self = Thread::Current();
367 {
368 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
369 // Process the mark stack and issue an empty check point. If the
370 // mark stack is still empty after the check point, we're
371 // done. Otherwise, repeat.
372 ProcessMarkStack();
373 size_t count = 0;
374 while (!ProcessMarkStack()) {
375 ++count;
376 if (kVerboseMode) {
377 LOG(INFO) << "Issue an empty check point. " << count;
378 }
379 IssueEmptyCheckpoint();
380 }
381 // Need to ensure the mark stack is empty before reference
382 // processing to get rid of non-reference gray objects.
383 CheckEmptyMarkQueue();
384 // Enable the GetReference slow path and disallow access to the system weaks.
385 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
386 Runtime::Current()->DisallowNewSystemWeaks();
387 QuasiAtomic::ThreadFenceForConstructor();
388 // Lock-unlock the system weak locks so that there's no thread in
389 // the middle of accessing system weaks.
390 Runtime::Current()->EnsureNewSystemWeaksDisallowed();
391 // Note: Do not issue a checkpoint from here to the
392 // SweepSystemWeaks call or else a deadlock due to
393 // WaitHoldingLocks() would occur.
394 if (kVerboseMode) {
395 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks.";
396 LOG(INFO) << "ProcessReferences";
397 }
398 ProcessReferences(self, true);
399 CheckEmptyMarkQueue();
400 if (kVerboseMode) {
401 LOG(INFO) << "SweepSystemWeaks";
402 }
403 SweepSystemWeaks(self);
404 if (kVerboseMode) {
405 LOG(INFO) << "SweepSystemWeaks done";
406 }
407 // Because hash_set::Erase() can call the hash function for
408 // arbitrary elements in the weak intern table in
409 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks()
410 // call may have marked some objects (strings) alive. So process
411 // the mark stack here once again.
412 ProcessMarkStack();
413 CheckEmptyMarkQueue();
414 // Disable marking.
415 if (kUseTableLookupReadBarrier) {
416 heap_->rb_table_->ClearAll();
417 DCHECK(heap_->rb_table_->IsAllCleared());
418 }
419 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1);
420 is_marking_ = false;
421 if (kVerboseMode) {
422 LOG(INFO) << "AllowNewSystemWeaks";
423 }
424 Runtime::Current()->AllowNewSystemWeaks();
425 CheckEmptyMarkQueue();
426 }
427
428 if (kVerboseMode) {
429 LOG(INFO) << "GC end of MarkingPhase";
430 }
431}
432
433void ConcurrentCopying::IssueEmptyCheckpoint() {
434 Thread* self = Thread::Current();
435 EmptyCheckpoint check_point(this);
436 ThreadList* thread_list = Runtime::Current()->GetThreadList();
437 gc_barrier_->Init(self, 0);
438 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800439 // If there are no threads to wait which implys that all the checkpoint functions are finished,
440 // then no need to release the mutator lock.
441 if (barrier_count == 0) {
442 return;
443 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800444 // Release locks then wait for all mutator threads to pass the barrier.
445 Locks::mutator_lock_->SharedUnlock(self);
446 {
447 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
448 gc_barrier_->Increment(self, barrier_count);
449 }
450 Locks::mutator_lock_->SharedLock(self);
451}
452
453mirror::Object* ConcurrentCopying::PopOffMarkStack() {
454 return mark_queue_.Dequeue();
455}
456
457template<bool kThreadSafe>
458void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
459 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0)
460 << " " << to_ref << " " << PrettyTypeOf(to_ref);
461 if (kThreadSafe) {
462 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow";
463 } else {
464 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow";
465 }
466}
467
468accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
469 return heap_->allocation_stack_.get();
470}
471
472accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
473 return heap_->live_stack_.get();
474}
475
476inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
477 DCHECK(region_space_->IsInFromSpace(from_ref));
478 LockWord lw = from_ref->GetLockWord(false);
479 if (lw.GetState() == LockWord::kForwardingAddress) {
480 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
481 CHECK(fwd_ptr != nullptr);
482 return fwd_ptr;
483 } else {
484 return nullptr;
485 }
486}
487
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800488// The following visitors are that used to verify that there's no
489// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700490class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800491 public:
492 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
493 : collector_(collector) {}
494
495 void operator()(mirror::Object* ref) const
496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
497 if (ref == nullptr) {
498 // OK.
499 return;
500 }
501 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
502 if (kUseBakerReadBarrier) {
503 if (collector_->RegionSpace()->IsInToSpace(ref)) {
504 CHECK(ref->GetReadBarrierPointer() == nullptr)
505 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
506 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
507 } else {
508 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
509 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
510 collector_->IsOnAllocStack(ref)))
511 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
512 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
513 << " but isn't on the alloc stack (and has white rb_ptr)."
514 << " Is it in the non-moving space="
515 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
516 }
517 }
518 }
519
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700520 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
521 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800522 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700523 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800524 }
525
526 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700527 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800528};
529
530class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
531 public:
532 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
533 : collector_(collector) {}
534
535 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
536 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
537 mirror::Object* ref =
538 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
539 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
540 visitor(ref);
541 }
542 void operator()(mirror::Class* klass, mirror::Reference* ref) const
543 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
544 CHECK(klass->IsTypeOfReferenceClass());
545 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
546 }
547
548 private:
549 ConcurrentCopying* collector_;
550};
551
552class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
553 public:
554 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
555 : collector_(collector) {}
556 void operator()(mirror::Object* obj) const
557 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
558 ObjectCallback(obj, collector_);
559 }
560 static void ObjectCallback(mirror::Object* obj, void *arg)
561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
562 CHECK(obj != nullptr);
563 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
564 space::RegionSpace* region_space = collector->RegionSpace();
565 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
566 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
567 obj->VisitReferences<true>(visitor, visitor);
568 if (kUseBakerReadBarrier) {
569 if (collector->RegionSpace()->IsInToSpace(obj)) {
570 CHECK(obj->GetReadBarrierPointer() == nullptr)
571 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
572 } else {
573 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
574 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
575 collector->IsOnAllocStack(obj)))
576 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
577 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
578 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
579 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
580 }
581 }
582 }
583
584 private:
585 ConcurrentCopying* const collector_;
586};
587
588// Verify there's no from-space references left after the marking phase.
589void ConcurrentCopying::VerifyNoFromSpaceReferences() {
590 Thread* self = Thread::Current();
591 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
592 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
593 // Roots.
594 {
595 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700596 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
597 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800598 }
599 // The to-space.
600 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
601 this);
602 // Non-moving spaces.
603 {
604 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
605 heap_->GetMarkBitmap()->Visit(visitor);
606 }
607 // The alloc stack.
608 {
609 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800610 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
611 it < end; ++it) {
612 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800613 if (obj != nullptr && obj->GetClass() != nullptr) {
614 // TODO: need to call this only if obj is alive?
615 ref_visitor(obj);
616 visitor(obj);
617 }
618 }
619 }
620 // TODO: LOS. But only refs in LOS are classes.
621}
622
623// The following visitors are used to assert the to-space invariant.
624class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
625 public:
626 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
627 : collector_(collector) {}
628
629 void operator()(mirror::Object* ref) const
630 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
631 if (ref == nullptr) {
632 // OK.
633 return;
634 }
635 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
636 }
637 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
638 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
639 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
640 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
641 DCHECK(root != nullptr);
642 visitor(*root);
643 }
644
645 private:
646 ConcurrentCopying* collector_;
647};
648
649class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
650 public:
651 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
652 : collector_(collector) {}
653
654 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
655 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
656 mirror::Object* ref =
657 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
658 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
659 visitor(ref);
660 }
661 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
663 CHECK(klass->IsTypeOfReferenceClass());
664 }
665
666 private:
667 ConcurrentCopying* collector_;
668};
669
670class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
671 public:
672 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
673 : collector_(collector) {}
674 void operator()(mirror::Object* obj) const
675 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
676 ObjectCallback(obj, collector_);
677 }
678 static void ObjectCallback(mirror::Object* obj, void *arg)
679 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
680 CHECK(obj != nullptr);
681 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
682 space::RegionSpace* region_space = collector->RegionSpace();
683 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
684 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
685 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
686 obj->VisitReferences<true>(visitor, visitor);
687 }
688
689 private:
690 ConcurrentCopying* collector_;
691};
692
693bool ConcurrentCopying::ProcessMarkStack() {
694 if (kVerboseMode) {
695 LOG(INFO) << "ProcessMarkStack. ";
696 }
697 size_t count = 0;
698 mirror::Object* to_ref;
699 while ((to_ref = PopOffMarkStack()) != nullptr) {
700 ++count;
701 DCHECK(!region_space_->IsInFromSpace(to_ref));
702 if (kUseBakerReadBarrier) {
703 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
704 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
705 << " is_marked=" << IsMarked(to_ref);
706 }
707 // Scan ref fields.
708 Scan(to_ref);
709 // Mark the gray ref as white or black.
710 if (kUseBakerReadBarrier) {
711 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
712 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
713 << " is_marked=" << IsMarked(to_ref);
714 }
715 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
716 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
717 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
718 // Leave References gray so that GetReferent() will trigger RB.
719 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
720 } else {
721 if (kUseBakerReadBarrier) {
722 if (region_space_->IsInToSpace(to_ref)) {
723 // If to-space, change from gray to white.
724 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
725 ReadBarrier::WhitePtr());
726 CHECK(success) << "Must succeed as we won the race.";
727 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
728 } else {
729 // If non-moving space/unevac from space, change from gray
730 // to black. We can't change gray to white because it's not
731 // safe to use CAS if two threads change values in opposite
732 // directions (A->B and B->A). So, we change it to black to
733 // indicate non-moving objects that have been marked
734 // through. Note we'd need to change from black to white
735 // later (concurrently).
736 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
737 ReadBarrier::BlackPtr());
738 CHECK(success) << "Must succeed as we won the race.";
739 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
740 }
741 }
742 }
743 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
744 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
745 visitor(to_ref);
746 }
747 }
748 // Return true if the stack was empty.
749 return count == 0;
750}
751
752void ConcurrentCopying::CheckEmptyMarkQueue() {
753 if (!mark_queue_.IsEmpty()) {
754 while (!mark_queue_.IsEmpty()) {
755 mirror::Object* obj = mark_queue_.Dequeue();
756 if (kUseBakerReadBarrier) {
757 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
758 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
759 << " is_marked=" << IsMarked(obj);
760 } else {
761 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
762 << " is_marked=" << IsMarked(obj);
763 }
764 }
765 LOG(FATAL) << "mark queue is not empty";
766 }
767}
768
769void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
770 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
771 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
772 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
773}
774
775void ConcurrentCopying::Sweep(bool swap_bitmaps) {
776 {
777 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
778 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
779 if (kEnableFromSpaceAccountingCheck) {
780 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
781 }
782 heap_->MarkAllocStackAsLive(live_stack);
783 live_stack->Reset();
784 }
785 CHECK(mark_queue_.IsEmpty());
786 TimingLogger::ScopedTiming split("Sweep", GetTimings());
787 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
788 if (space->IsContinuousMemMapAllocSpace()) {
789 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
790 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
791 continue;
792 }
793 TimingLogger::ScopedTiming split2(
794 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
795 RecordFree(alloc_space->Sweep(swap_bitmaps));
796 }
797 }
798 SweepLargeObjects(swap_bitmaps);
799}
800
801void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
802 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
803 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
804}
805
806class ConcurrentCopyingClearBlackPtrsVisitor {
807 public:
808 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
809 : collector_(cc) {}
Andreas Gampe65b798e2015-04-06 09:35:22 -0700810#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
811 NO_RETURN
812#endif
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800813 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
814 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
815 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800816 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
817 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800818 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800819 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800820 }
821
822 private:
823 ConcurrentCopying* const collector_;
824};
825
826// Clear the black ptrs in non-moving objects back to white.
827void ConcurrentCopying::ClearBlackPtrs() {
828 CHECK(kUseBakerReadBarrier);
829 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
830 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
831 for (auto& space : heap_->GetContinuousSpaces()) {
832 if (space == region_space_) {
833 continue;
834 }
835 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
836 if (kVerboseMode) {
837 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
838 }
839 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
840 reinterpret_cast<uintptr_t>(space->Limit()),
841 visitor);
842 }
843 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
844 large_object_space->GetMarkBitmap()->VisitMarkedRange(
845 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
846 reinterpret_cast<uintptr_t>(large_object_space->End()),
847 visitor);
848 // Objects on the allocation stack?
849 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
850 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800851 auto* it = GetAllocationStack()->Begin();
852 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800853 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800854 CHECK_LT(it, end);
855 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800856 if (obj != nullptr) {
857 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800858 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800859 }
860 }
861 }
862}
863
864void ConcurrentCopying::ReclaimPhase() {
865 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
866 if (kVerboseMode) {
867 LOG(INFO) << "GC ReclaimPhase";
868 }
869 Thread* self = Thread::Current();
870
871 {
872 // Double-check that the mark stack is empty.
873 // Note: need to set this after VerifyNoFromSpaceRef().
874 is_asserting_to_space_invariant_ = false;
875 QuasiAtomic::ThreadFenceForConstructor();
876 if (kVerboseMode) {
877 LOG(INFO) << "Issue an empty check point. ";
878 }
879 IssueEmptyCheckpoint();
880 // Disable the check.
881 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0);
882 CheckEmptyMarkQueue();
883 }
884
885 {
886 // Record freed objects.
887 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
888 // Don't include thread-locals that are in the to-space.
889 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
890 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
891 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
892 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
893 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
894 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
895 if (kEnableFromSpaceAccountingCheck) {
896 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
897 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
898 }
899 CHECK_LE(to_objects, from_objects);
900 CHECK_LE(to_bytes, from_bytes);
901 int64_t freed_bytes = from_bytes - to_bytes;
902 int64_t freed_objects = from_objects - to_objects;
903 if (kVerboseMode) {
904 LOG(INFO) << "RecordFree:"
905 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
906 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
907 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
908 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
909 << " from_space size=" << region_space_->FromSpaceSize()
910 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
911 << " to_space size=" << region_space_->ToSpaceSize();
912 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
913 }
914 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
915 if (kVerboseMode) {
916 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
917 }
918 }
919
920 {
921 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
922 ComputeUnevacFromSpaceLiveRatio();
923 }
924
925 {
926 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
927 region_space_->ClearFromSpace();
928 }
929
930 {
931 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
932 if (kUseBakerReadBarrier) {
933 ClearBlackPtrs();
934 }
935 Sweep(false);
936 SwapBitmaps();
937 heap_->UnBindBitmaps();
938
939 // Remove bitmaps for the immune spaces.
940 while (!cc_bitmaps_.empty()) {
941 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
942 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
943 delete cc_bitmap;
944 cc_bitmaps_.pop_back();
945 }
946 region_space_bitmap_ = nullptr;
947 }
948
949 if (kVerboseMode) {
950 LOG(INFO) << "GC end of ReclaimPhase";
951 }
952}
953
954class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
955 public:
956 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
957 : collector_(cc) {}
958 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
959 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
960 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800961 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
962 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800963 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800964 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800965 // Clear the black ptr.
966 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr());
967 }
968 size_t obj_size = ref->SizeOf();
969 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
970 collector_->region_space_->AddLiveBytes(ref, alloc_size);
971 }
972
973 private:
974 ConcurrentCopying* collector_;
975};
976
977// Compute how much live objects are left in regions.
978void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
979 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
980 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
981 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
982 reinterpret_cast<uintptr_t>(region_space_->Limit()),
983 visitor);
984}
985
986// Assert the to-space invariant.
987void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
988 mirror::Object* ref) {
989 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
990 if (is_asserting_to_space_invariant_) {
991 if (region_space_->IsInToSpace(ref)) {
992 // OK.
993 return;
994 } else if (region_space_->IsInUnevacFromSpace(ref)) {
995 CHECK(region_space_bitmap_->Test(ref)) << ref;
996 } else if (region_space_->IsInFromSpace(ref)) {
997 // Not OK. Do extra logging.
998 if (obj != nullptr) {
999 if (kUseBakerReadBarrier) {
1000 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1001 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1002 } else {
1003 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1004 }
1005 if (region_space_->IsInFromSpace(obj)) {
1006 LOG(INFO) << "holder is in the from-space.";
1007 } else if (region_space_->IsInToSpace(obj)) {
1008 LOG(INFO) << "holder is in the to-space.";
1009 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1010 LOG(INFO) << "holder is in the unevac from-space.";
1011 if (region_space_bitmap_->Test(obj)) {
1012 LOG(INFO) << "holder is marked in the region space bitmap.";
1013 } else {
1014 LOG(INFO) << "holder is not marked in the region space bitmap.";
1015 }
1016 } else {
1017 // In a non-moving space.
1018 if (immune_region_.ContainsObject(obj)) {
1019 LOG(INFO) << "holder is in the image or the zygote space.";
1020 accounting::ContinuousSpaceBitmap* cc_bitmap =
1021 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1022 CHECK(cc_bitmap != nullptr)
1023 << "An immune space object must have a bitmap.";
1024 if (cc_bitmap->Test(obj)) {
1025 LOG(INFO) << "holder is marked in the bit map.";
1026 } else {
1027 LOG(INFO) << "holder is NOT marked in the bit map.";
1028 }
1029 } else {
1030 LOG(INFO) << "holder is in a non-moving (or main) space.";
1031 accounting::ContinuousSpaceBitmap* mark_bitmap =
1032 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1033 accounting::LargeObjectBitmap* los_bitmap =
1034 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1035 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1036 bool is_los = mark_bitmap == nullptr;
1037 if (!is_los && mark_bitmap->Test(obj)) {
1038 LOG(INFO) << "holder is marked in the mark bit map.";
1039 } else if (is_los && los_bitmap->Test(obj)) {
1040 LOG(INFO) << "holder is marked in the los bit map.";
1041 } else {
1042 // If ref is on the allocation stack, then it is considered
1043 // mark/alive (but not necessarily on the live stack.)
1044 if (IsOnAllocStack(obj)) {
1045 LOG(INFO) << "holder is on the alloc stack.";
1046 } else {
1047 LOG(INFO) << "holder is not marked or on the alloc stack.";
1048 }
1049 }
1050 }
1051 }
1052 LOG(INFO) << "offset=" << offset.SizeValue();
1053 }
1054 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1055 } else {
1056 // In a non-moving spaces. Check that the ref is marked.
1057 if (immune_region_.ContainsObject(ref)) {
1058 accounting::ContinuousSpaceBitmap* cc_bitmap =
1059 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1060 CHECK(cc_bitmap != nullptr)
1061 << "An immune space ref must have a bitmap. " << ref;
1062 if (kUseBakerReadBarrier) {
1063 CHECK(cc_bitmap->Test(ref))
1064 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1065 << obj->GetReadBarrierPointer() << " ref=" << ref;
1066 } else {
1067 CHECK(cc_bitmap->Test(ref))
1068 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1069 }
1070 } else {
1071 accounting::ContinuousSpaceBitmap* mark_bitmap =
1072 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1073 accounting::LargeObjectBitmap* los_bitmap =
1074 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1075 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1076 bool is_los = mark_bitmap == nullptr;
1077 if ((!is_los && mark_bitmap->Test(ref)) ||
1078 (is_los && los_bitmap->Test(ref))) {
1079 // OK.
1080 } else {
1081 // If ref is on the allocation stack, then it may not be
1082 // marked live, but considered marked/alive (but not
1083 // necessarily on the live stack).
1084 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1085 << "obj=" << obj << " ref=" << ref;
1086 }
1087 }
1088 }
1089 }
1090}
1091
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001092// Used to scan ref fields of an object.
1093class ConcurrentCopyingRefFieldsVisitor {
1094 public:
1095 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1096 : collector_(collector) {}
1097
1098 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1099 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1100 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1101 collector_->Process(obj, offset);
1102 }
1103
1104 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1105 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1106 CHECK(klass->IsTypeOfReferenceClass());
1107 collector_->DelayReferenceReferent(klass, ref);
1108 }
1109
1110 private:
1111 ConcurrentCopying* const collector_;
1112};
1113
1114// Scan ref fields of an object.
1115void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1116 DCHECK(!region_space_->IsInFromSpace(to_ref));
1117 ConcurrentCopyingRefFieldsVisitor visitor(this);
1118 to_ref->VisitReferences<true>(visitor, visitor);
1119}
1120
1121// Process a field.
1122inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1123 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1124 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1125 return;
1126 }
1127 mirror::Object* to_ref = Mark(ref);
1128 if (to_ref == ref) {
1129 return;
1130 }
1131 // This may fail if the mutator writes to the field at the same time. But it's ok.
1132 mirror::Object* expected_ref = ref;
1133 mirror::Object* new_ref = to_ref;
1134 do {
1135 if (expected_ref !=
1136 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1137 // It was updated by the mutator.
1138 break;
1139 }
1140 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1141 offset, expected_ref, new_ref));
1142}
1143
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001144// Process some roots.
1145void ConcurrentCopying::VisitRoots(
1146 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1147 for (size_t i = 0; i < count; ++i) {
1148 mirror::Object** root = roots[i];
1149 mirror::Object* ref = *root;
1150 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1151 return;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001152 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001153 mirror::Object* to_ref = Mark(ref);
1154 if (to_ref == ref) {
1155 return;
1156 }
1157 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1158 mirror::Object* expected_ref = ref;
1159 mirror::Object* new_ref = to_ref;
1160 do {
1161 if (expected_ref != addr->LoadRelaxed()) {
1162 // It was updated by the mutator.
1163 break;
1164 }
1165 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1166 }
1167}
1168
1169void ConcurrentCopying::VisitRoots(
1170 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1171 const RootInfo& info ATTRIBUTE_UNUSED) {
1172 for (size_t i = 0; i < count; ++i) {
1173 mirror::CompressedReference<mirror::Object>* root = roots[i];
1174 mirror::Object* ref = root->AsMirrorPtr();
1175 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1176 return;
1177 }
1178 mirror::Object* to_ref = Mark(ref);
1179 if (to_ref == ref) {
1180 return;
1181 }
1182 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1183 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1184 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1185 do {
1186 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1187 // It was updated by the mutator.
1188 break;
1189 }
1190 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1191 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001192}
1193
1194// Fill the given memory block with a dummy object. Used to fill in a
1195// copy of objects that was lost in race.
1196void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1197 CHECK(IsAligned<kObjectAlignment>(byte_size));
1198 memset(dummy_obj, 0, byte_size);
1199 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1200 CHECK(int_array_class != nullptr);
1201 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1202 size_t component_size = int_array_class->GetComponentSize();
1203 CHECK_EQ(component_size, sizeof(int32_t));
1204 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1205 if (data_offset > byte_size) {
1206 // An int array is too big. Use java.lang.Object.
1207 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1208 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1209 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1210 dummy_obj->SetClass(java_lang_Object);
1211 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1212 } else {
1213 // Use an int array.
1214 dummy_obj->SetClass(int_array_class);
1215 CHECK(dummy_obj->IsArrayInstance());
1216 int32_t length = (byte_size - data_offset) / component_size;
1217 dummy_obj->AsArray()->SetLength(length);
1218 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1219 << "byte_size=" << byte_size << " length=" << length
1220 << " component_size=" << component_size << " data_offset=" << data_offset;
1221 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1222 << "byte_size=" << byte_size << " length=" << length
1223 << " component_size=" << component_size << " data_offset=" << data_offset;
1224 }
1225}
1226
1227// Reuse the memory blocks that were copy of objects that were lost in race.
1228mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1229 // Try to reuse the blocks that were unused due to CAS failures.
1230 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1231 Thread* self = Thread::Current();
1232 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1233 MutexLock mu(self, skipped_blocks_lock_);
1234 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1235 if (it == skipped_blocks_map_.end()) {
1236 // Not found.
1237 return nullptr;
1238 }
1239 {
1240 size_t byte_size = it->first;
1241 CHECK_GE(byte_size, alloc_size);
1242 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1243 // If remainder would be too small for a dummy object, retry with a larger request size.
1244 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1245 if (it == skipped_blocks_map_.end()) {
1246 // Not found.
1247 return nullptr;
1248 }
1249 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1250 CHECK_GE(it->first - alloc_size, min_object_size)
1251 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1252 }
1253 }
1254 // Found a block.
1255 CHECK(it != skipped_blocks_map_.end());
1256 size_t byte_size = it->first;
1257 uint8_t* addr = it->second;
1258 CHECK_GE(byte_size, alloc_size);
1259 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1260 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1261 if (kVerboseMode) {
1262 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1263 }
1264 skipped_blocks_map_.erase(it);
1265 memset(addr, 0, byte_size);
1266 if (byte_size > alloc_size) {
1267 // Return the remainder to the map.
1268 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1269 CHECK_GE(byte_size - alloc_size, min_object_size);
1270 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1271 byte_size - alloc_size);
1272 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1273 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1274 }
1275 return reinterpret_cast<mirror::Object*>(addr);
1276}
1277
1278mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1279 DCHECK(region_space_->IsInFromSpace(from_ref));
1280 // No read barrier to avoid nested RB that might violate the to-space
1281 // invariant. Note that from_ref is a from space ref so the SizeOf()
1282 // call will access the from-space meta objects, but it's ok and necessary.
1283 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1284 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1285 size_t region_space_bytes_allocated = 0U;
1286 size_t non_moving_space_bytes_allocated = 0U;
1287 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001288 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001289 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001290 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001291 bytes_allocated = region_space_bytes_allocated;
1292 if (to_ref != nullptr) {
1293 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1294 }
1295 bool fall_back_to_non_moving = false;
1296 if (UNLIKELY(to_ref == nullptr)) {
1297 // Failed to allocate in the region space. Try the skipped blocks.
1298 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1299 if (to_ref != nullptr) {
1300 // Succeeded to allocate in a skipped block.
1301 if (heap_->use_tlab_) {
1302 // This is necessary for the tlab case as it's not accounted in the space.
1303 region_space_->RecordAlloc(to_ref);
1304 }
1305 bytes_allocated = region_space_alloc_size;
1306 } else {
1307 // Fall back to the non-moving space.
1308 fall_back_to_non_moving = true;
1309 if (kVerboseMode) {
1310 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1311 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1312 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1313 }
1314 fall_back_to_non_moving = true;
1315 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001316 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001317 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1318 bytes_allocated = non_moving_space_bytes_allocated;
1319 // Mark it in the mark bitmap.
1320 accounting::ContinuousSpaceBitmap* mark_bitmap =
1321 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1322 CHECK(mark_bitmap != nullptr);
1323 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1324 }
1325 }
1326 DCHECK(to_ref != nullptr);
1327
1328 // Attempt to install the forward pointer. This is in a loop as the
1329 // lock word atomic write can fail.
1330 while (true) {
1331 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1332 memcpy(to_ref, from_ref, obj_size);
1333 // Set the gray ptr.
1334 if (kUseBakerReadBarrier) {
1335 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1336 }
1337
1338 LockWord old_lock_word = to_ref->GetLockWord(false);
1339
1340 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1341 // Lost the race. Another thread (either GC or mutator) stored
1342 // the forwarding pointer first. Make the lost copy (to_ref)
1343 // look like a valid but dead (dummy) object and keep it for
1344 // future reuse.
1345 FillWithDummyObject(to_ref, bytes_allocated);
1346 if (!fall_back_to_non_moving) {
1347 DCHECK(region_space_->IsInToSpace(to_ref));
1348 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1349 // Free the large alloc.
1350 region_space_->FreeLarge(to_ref, bytes_allocated);
1351 } else {
1352 // Record the lost copy for later reuse.
1353 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1354 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1355 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1356 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1357 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1358 reinterpret_cast<uint8_t*>(to_ref)));
1359 }
1360 } else {
1361 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1362 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1363 // Free the non-moving-space chunk.
1364 accounting::ContinuousSpaceBitmap* mark_bitmap =
1365 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1366 CHECK(mark_bitmap != nullptr);
1367 CHECK(mark_bitmap->Clear(to_ref));
1368 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1369 }
1370
1371 // Get the winner's forward ptr.
1372 mirror::Object* lost_fwd_ptr = to_ref;
1373 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1374 CHECK(to_ref != nullptr);
1375 CHECK_NE(to_ref, lost_fwd_ptr);
1376 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1377 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1378 return to_ref;
1379 }
1380
1381 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1382
1383 // Try to atomically write the fwd ptr.
1384 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1385 if (LIKELY(success)) {
1386 // The CAS succeeded.
1387 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1388 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1389 if (LIKELY(!fall_back_to_non_moving)) {
1390 DCHECK(region_space_->IsInToSpace(to_ref));
1391 } else {
1392 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1393 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1394 }
1395 if (kUseBakerReadBarrier) {
1396 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1397 }
1398 DCHECK(GetFwdPtr(from_ref) == to_ref);
1399 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1400 PushOntoMarkStack<true>(to_ref);
1401 return to_ref;
1402 } else {
1403 // The CAS failed. It may have lost the race or may have failed
1404 // due to monitor/hashcode ops. Either way, retry.
1405 }
1406 }
1407}
1408
1409mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1410 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001411 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1412 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001413 // It's already marked.
1414 return from_ref;
1415 }
1416 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001417 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001418 to_ref = GetFwdPtr(from_ref);
1419 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1420 heap_->non_moving_space_->HasAddress(to_ref))
1421 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001422 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001423 if (region_space_bitmap_->Test(from_ref)) {
1424 to_ref = from_ref;
1425 } else {
1426 to_ref = nullptr;
1427 }
1428 } else {
1429 // from_ref is in a non-moving space.
1430 if (immune_region_.ContainsObject(from_ref)) {
1431 accounting::ContinuousSpaceBitmap* cc_bitmap =
1432 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1433 DCHECK(cc_bitmap != nullptr)
1434 << "An immune space object must have a bitmap";
1435 if (kIsDebugBuild) {
1436 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1437 << "Immune space object must be already marked";
1438 }
1439 if (cc_bitmap->Test(from_ref)) {
1440 // Already marked.
1441 to_ref = from_ref;
1442 } else {
1443 // Newly marked.
1444 to_ref = nullptr;
1445 }
1446 } else {
1447 // Non-immune non-moving space. Use the mark bitmap.
1448 accounting::ContinuousSpaceBitmap* mark_bitmap =
1449 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1450 accounting::LargeObjectBitmap* los_bitmap =
1451 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1452 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1453 bool is_los = mark_bitmap == nullptr;
1454 if (!is_los && mark_bitmap->Test(from_ref)) {
1455 // Already marked.
1456 to_ref = from_ref;
1457 } else if (is_los && los_bitmap->Test(from_ref)) {
1458 // Already marked in LOS.
1459 to_ref = from_ref;
1460 } else {
1461 // Not marked.
1462 if (IsOnAllocStack(from_ref)) {
1463 // If on the allocation stack, it's considered marked.
1464 to_ref = from_ref;
1465 } else {
1466 // Not marked.
1467 to_ref = nullptr;
1468 }
1469 }
1470 }
1471 }
1472 return to_ref;
1473}
1474
1475bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1476 QuasiAtomic::ThreadFenceAcquire();
1477 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001478 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001479}
1480
1481mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1482 if (from_ref == nullptr) {
1483 return nullptr;
1484 }
1485 DCHECK(from_ref != nullptr);
1486 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001487 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1488 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001489 // It's already marked.
1490 return from_ref;
1491 }
1492 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001493 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001494 to_ref = GetFwdPtr(from_ref);
1495 if (kUseBakerReadBarrier) {
1496 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1497 }
1498 if (to_ref == nullptr) {
1499 // It isn't marked yet. Mark it by copying it to the to-space.
1500 to_ref = Copy(from_ref);
1501 }
1502 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1503 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001504 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001505 // This may or may not succeed, which is ok.
1506 if (kUseBakerReadBarrier) {
1507 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1508 }
1509 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1510 // Already marked.
1511 to_ref = from_ref;
1512 } else {
1513 // Newly marked.
1514 to_ref = from_ref;
1515 if (kUseBakerReadBarrier) {
1516 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1517 }
1518 PushOntoMarkStack<true>(to_ref);
1519 }
1520 } else {
1521 // from_ref is in a non-moving space.
1522 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1523 if (immune_region_.ContainsObject(from_ref)) {
1524 accounting::ContinuousSpaceBitmap* cc_bitmap =
1525 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1526 DCHECK(cc_bitmap != nullptr)
1527 << "An immune space object must have a bitmap";
1528 if (kIsDebugBuild) {
1529 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1530 << "Immune space object must be already marked";
1531 }
1532 // This may or may not succeed, which is ok.
1533 if (kUseBakerReadBarrier) {
1534 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1535 }
1536 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1537 // Already marked.
1538 to_ref = from_ref;
1539 } else {
1540 // Newly marked.
1541 to_ref = from_ref;
1542 if (kUseBakerReadBarrier) {
1543 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1544 }
1545 PushOntoMarkStack<true>(to_ref);
1546 }
1547 } else {
1548 // Use the mark bitmap.
1549 accounting::ContinuousSpaceBitmap* mark_bitmap =
1550 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1551 accounting::LargeObjectBitmap* los_bitmap =
1552 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1553 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1554 bool is_los = mark_bitmap == nullptr;
1555 if (!is_los && mark_bitmap->Test(from_ref)) {
1556 // Already marked.
1557 to_ref = from_ref;
1558 if (kUseBakerReadBarrier) {
1559 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1560 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1561 }
1562 } else if (is_los && los_bitmap->Test(from_ref)) {
1563 // Already marked in LOS.
1564 to_ref = from_ref;
1565 if (kUseBakerReadBarrier) {
1566 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1567 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1568 }
1569 } else {
1570 // Not marked.
1571 if (IsOnAllocStack(from_ref)) {
1572 // If it's on the allocation stack, it's considered marked. Keep it white.
1573 to_ref = from_ref;
1574 // Objects on the allocation stack need not be marked.
1575 if (!is_los) {
1576 DCHECK(!mark_bitmap->Test(to_ref));
1577 } else {
1578 DCHECK(!los_bitmap->Test(to_ref));
1579 }
1580 if (kUseBakerReadBarrier) {
1581 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1582 }
1583 } else {
1584 // Not marked or on the allocation stack. Try to mark it.
1585 // This may or may not succeed, which is ok.
1586 if (kUseBakerReadBarrier) {
1587 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1588 }
1589 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1590 // Already marked.
1591 to_ref = from_ref;
1592 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
1593 // Already marked in LOS.
1594 to_ref = from_ref;
1595 } else {
1596 // Newly marked.
1597 to_ref = from_ref;
1598 if (kUseBakerReadBarrier) {
1599 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1600 }
1601 PushOntoMarkStack<true>(to_ref);
1602 }
1603 }
1604 }
1605 }
1606 }
1607 return to_ref;
1608}
1609
1610void ConcurrentCopying::FinishPhase() {
1611 region_space_ = nullptr;
1612 CHECK(mark_queue_.IsEmpty());
1613 mark_queue_.Clear();
1614 {
1615 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1616 skipped_blocks_map_.clear();
1617 }
1618 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1619 heap_->ClearMarkedObjects();
1620}
1621
1622mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
1623 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1624}
1625
1626bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
1627 mirror::HeapReference<mirror::Object>* field, void* arg) {
1628 mirror::Object* from_ref = field->AsMirrorPtr();
1629 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1630 if (to_ref == nullptr) {
1631 return false;
1632 }
1633 if (from_ref != to_ref) {
1634 QuasiAtomic::ThreadFenceRelease();
1635 field->Assign(to_ref);
1636 QuasiAtomic::ThreadFenceSequentiallyConsistent();
1637 }
1638 return true;
1639}
1640
1641mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
1642 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
1643}
1644
1645void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
1646 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack();
1647}
1648
1649void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
1650 heap_->GetReferenceProcessor()->DelayReferenceReferent(
1651 klass, reference, &IsHeapReferenceMarkedCallback, this);
1652}
1653
1654void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) {
1655 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
1656 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1657 GetHeap()->GetReferenceProcessor()->ProcessReferences(
1658 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
1659 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
1660}
1661
1662void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
1663 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1664 region_space_->RevokeAllThreadLocalBuffers();
1665}
1666
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001667} // namespace collector
1668} // namespace gc
1669} // namespace art