blob: 56919bd8af8397f6405299bdce450890ecb4b151 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080019#include "gc/accounting/heap_bitmap-inl.h"
20#include "gc/accounting/space_bitmap-inl.h"
21#include "gc/space/image_space.h"
22#include "gc/space/space.h"
23#include "intern_table.h"
24#include "mirror/art_field-inl.h"
25#include "mirror/object-inl.h"
26#include "scoped_thread_state_change.h"
27#include "thread-inl.h"
28#include "thread_list.h"
29#include "well_known_classes.h"
30
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070031namespace art {
32namespace gc {
33namespace collector {
34
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080035ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
36 : GarbageCollector(heap,
37 name_prefix + (name_prefix.empty() ? "" : " ") +
38 "concurrent copying + mark sweep"),
39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB),
40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0),
42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
43 rb_table_(heap_->GetReadBarrierTable()),
44 force_evacuate_all_(false) {
45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
46 "The region space size and the read barrier table region size must match");
47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
48 {
49 Thread* self = Thread::Current();
50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
51 // Cache this so that we won't have to lock heap_bitmap_lock_ in
52 // Mark() which could cause a nested lock on heap_bitmap_lock_
53 // when GC causes a RB while doing GC or a lock order violation
54 // (class_linker_lock_ and heap_bitmap_lock_).
55 heap_mark_bitmap_ = heap->GetMarkBitmap();
56 }
57}
58
59ConcurrentCopying::~ConcurrentCopying() {
60}
61
62void ConcurrentCopying::RunPhases() {
63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
64 CHECK(!is_active_);
65 is_active_ = true;
66 Thread* self = Thread::Current();
67 Locks::mutator_lock_->AssertNotHeld(self);
68 {
69 ReaderMutexLock mu(self, *Locks::mutator_lock_);
70 InitializePhase();
71 }
72 FlipThreadRoots();
73 {
74 ReaderMutexLock mu(self, *Locks::mutator_lock_);
75 MarkingPhase();
76 }
77 // Verify no from space refs. This causes a pause.
78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
80 ScopedPause pause(this);
81 CheckEmptyMarkQueue();
82 if (kVerboseMode) {
83 LOG(INFO) << "Verifying no from-space refs";
84 }
85 VerifyNoFromSpaceReferences();
86 CheckEmptyMarkQueue();
87 }
88 {
89 ReaderMutexLock mu(self, *Locks::mutator_lock_);
90 ReclaimPhase();
91 }
92 FinishPhase();
93 CHECK(is_active_);
94 is_active_ = false;
95}
96
97void ConcurrentCopying::BindBitmaps() {
98 Thread* self = Thread::Current();
99 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
100 // Mark all of the spaces we never collect as immune.
101 for (const auto& space : heap_->GetContinuousSpaces()) {
102 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
103 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
104 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
105 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
106 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
107 "cc zygote space bitmap";
108 // TODO: try avoiding using bitmaps for image/zygote to save space.
109 accounting::ContinuousSpaceBitmap* bitmap =
110 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
111 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
112 cc_bitmaps_.push_back(bitmap);
113 } else if (space == region_space_) {
114 accounting::ContinuousSpaceBitmap* bitmap =
115 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
116 space->Begin(), space->Capacity());
117 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
118 cc_bitmaps_.push_back(bitmap);
119 region_space_bitmap_ = bitmap;
120 }
121 }
122}
123
124void ConcurrentCopying::InitializePhase() {
125 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
126 if (kVerboseMode) {
127 LOG(INFO) << "GC InitializePhase";
128 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
129 << reinterpret_cast<void*>(region_space_->Limit());
130 }
131 CHECK(mark_queue_.IsEmpty());
132 immune_region_.Reset();
133 bytes_moved_.StoreRelaxed(0);
134 objects_moved_.StoreRelaxed(0);
135 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
136 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
137 GetCurrentIteration()->GetClearSoftReferences()) {
138 force_evacuate_all_ = true;
139 } else {
140 force_evacuate_all_ = false;
141 }
142 BindBitmaps();
143 if (kVerboseMode) {
144 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
145 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
146 LOG(INFO) << "GC end of InitializePhase";
147 }
148}
149
150// Used to switch the thread roots of a thread from from-space refs to to-space refs.
151class ThreadFlipVisitor : public Closure {
152 public:
153 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
154 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
155 }
156
157 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
158 // Note: self is not necessarily equal to thread since thread may be suspended.
159 Thread* self = Thread::Current();
160 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
161 << thread->GetState() << " thread " << thread << " self " << self;
162 if (use_tlab_ && thread->HasTlab()) {
163 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
164 // This must come before the revoke.
165 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
166 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
167 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
168 FetchAndAddSequentiallyConsistent(thread_local_objects);
169 } else {
170 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
171 }
172 }
173 if (kUseThreadLocalAllocationStack) {
174 thread->RevokeThreadLocalAllocationStack();
175 }
176 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700177 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800178 concurrent_copying_->GetBarrier().Pass(self);
179 }
180
181 private:
182 ConcurrentCopying* const concurrent_copying_;
183 const bool use_tlab_;
184};
185
186// Called back from Runtime::FlipThreadRoots() during a pause.
187class FlipCallback : public Closure {
188 public:
189 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
190 : concurrent_copying_(concurrent_copying) {
191 }
192
193 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
194 ConcurrentCopying* cc = concurrent_copying_;
195 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
196 // Note: self is not necessarily equal to thread since thread may be suspended.
197 Thread* self = Thread::Current();
198 CHECK(thread == self);
199 Locks::mutator_lock_->AssertExclusiveHeld(self);
200 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
201 cc->SwapStacks(self);
202 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
203 cc->RecordLiveStackFreezeSize(self);
204 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
205 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
206 }
207 cc->is_marking_ = true;
208 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800209 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800210 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700211 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800212 }
213 }
214
215 private:
216 ConcurrentCopying* const concurrent_copying_;
217};
218
219// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
220void ConcurrentCopying::FlipThreadRoots() {
221 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
222 if (kVerboseMode) {
223 LOG(INFO) << "time=" << region_space_->Time();
224 region_space_->DumpNonFreeRegions(LOG(INFO));
225 }
226 Thread* self = Thread::Current();
227 Locks::mutator_lock_->AssertNotHeld(self);
228 gc_barrier_->Init(self, 0);
229 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
230 FlipCallback flip_callback(this);
231 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
232 &thread_flip_visitor, &flip_callback, this);
233 {
234 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
235 gc_barrier_->Increment(self, barrier_count);
236 }
237 is_asserting_to_space_invariant_ = true;
238 QuasiAtomic::ThreadFenceForConstructor();
239 if (kVerboseMode) {
240 LOG(INFO) << "time=" << region_space_->Time();
241 region_space_->DumpNonFreeRegions(LOG(INFO));
242 LOG(INFO) << "GC end of FlipThreadRoots";
243 }
244}
245
246void ConcurrentCopying::SwapStacks(Thread* self) {
247 heap_->SwapStacks(self);
248}
249
250void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
251 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
252 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
253}
254
255// Used to visit objects in the immune spaces.
256class ConcurrentCopyingImmuneSpaceObjVisitor {
257 public:
258 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
259 : collector_(cc) {}
260
261 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
262 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
263 DCHECK(obj != nullptr);
264 DCHECK(collector_->immune_region_.ContainsObject(obj));
265 accounting::ContinuousSpaceBitmap* cc_bitmap =
266 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
267 DCHECK(cc_bitmap != nullptr)
268 << "An immune space object must have a bitmap";
269 if (kIsDebugBuild) {
270 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
271 << "Immune space object must be already marked";
272 }
273 // This may or may not succeed, which is ok.
274 if (kUseBakerReadBarrier) {
275 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
276 }
277 if (cc_bitmap->AtomicTestAndSet(obj)) {
278 // Already marked. Do nothing.
279 } else {
280 // Newly marked. Set the gray bit and push it onto the mark stack.
281 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
282 collector_->PushOntoMarkStack<true>(obj);
283 }
284 }
285
286 private:
287 ConcurrentCopying* collector_;
288};
289
290class EmptyCheckpoint : public Closure {
291 public:
292 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
293 : concurrent_copying_(concurrent_copying) {
294 }
295
296 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
297 // Note: self is not necessarily equal to thread since thread may be suspended.
298 Thread* self = Thread::Current();
299 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
300 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800301 // If thread is a running mutator, then act on behalf of the garbage collector.
302 // See the code in ThreadList::RunCheckpoint.
303 if (thread->GetState() == kRunnable) {
304 concurrent_copying_->GetBarrier().Pass(self);
305 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800306 }
307
308 private:
309 ConcurrentCopying* const concurrent_copying_;
310};
311
312// Concurrently mark roots that are guarded by read barriers and process the mark stack.
313void ConcurrentCopying::MarkingPhase() {
314 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
315 if (kVerboseMode) {
316 LOG(INFO) << "GC MarkingPhase";
317 }
318 {
319 // Mark the image root. The WB-based collectors do not need to
320 // scan the image objects from roots by relying on the card table,
321 // but it's necessary for the RB to-space invariant to hold.
322 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
323 gc::space::ImageSpace* image = heap_->GetImageSpace();
324 if (image != nullptr) {
325 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
326 mirror::Object* marked_image_root = Mark(image_root);
327 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
328 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
329 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
330 }
331 }
332 }
333 {
334 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700335 Runtime::Current()->VisitConstantRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 }
337 {
338 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700339 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800340 }
341 {
342 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700343 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800344 }
345 {
346 // TODO: don't visit the transaction roots if it's not active.
347 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700348 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800349 }
350
351 // Immune spaces.
352 for (auto& space : heap_->GetContinuousSpaces()) {
353 if (immune_region_.ContainsSpace(space)) {
354 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
355 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
356 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
357 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
358 reinterpret_cast<uintptr_t>(space->Limit()),
359 visitor);
360 }
361 }
362
363 Thread* self = Thread::Current();
364 {
365 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
366 // Process the mark stack and issue an empty check point. If the
367 // mark stack is still empty after the check point, we're
368 // done. Otherwise, repeat.
369 ProcessMarkStack();
370 size_t count = 0;
371 while (!ProcessMarkStack()) {
372 ++count;
373 if (kVerboseMode) {
374 LOG(INFO) << "Issue an empty check point. " << count;
375 }
376 IssueEmptyCheckpoint();
377 }
378 // Need to ensure the mark stack is empty before reference
379 // processing to get rid of non-reference gray objects.
380 CheckEmptyMarkQueue();
381 // Enable the GetReference slow path and disallow access to the system weaks.
382 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
383 Runtime::Current()->DisallowNewSystemWeaks();
384 QuasiAtomic::ThreadFenceForConstructor();
385 // Lock-unlock the system weak locks so that there's no thread in
386 // the middle of accessing system weaks.
387 Runtime::Current()->EnsureNewSystemWeaksDisallowed();
388 // Note: Do not issue a checkpoint from here to the
389 // SweepSystemWeaks call or else a deadlock due to
390 // WaitHoldingLocks() would occur.
391 if (kVerboseMode) {
392 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks.";
393 LOG(INFO) << "ProcessReferences";
394 }
395 ProcessReferences(self, true);
396 CheckEmptyMarkQueue();
397 if (kVerboseMode) {
398 LOG(INFO) << "SweepSystemWeaks";
399 }
400 SweepSystemWeaks(self);
401 if (kVerboseMode) {
402 LOG(INFO) << "SweepSystemWeaks done";
403 }
404 // Because hash_set::Erase() can call the hash function for
405 // arbitrary elements in the weak intern table in
406 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks()
407 // call may have marked some objects (strings) alive. So process
408 // the mark stack here once again.
409 ProcessMarkStack();
410 CheckEmptyMarkQueue();
411 // Disable marking.
412 if (kUseTableLookupReadBarrier) {
413 heap_->rb_table_->ClearAll();
414 DCHECK(heap_->rb_table_->IsAllCleared());
415 }
416 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1);
417 is_marking_ = false;
418 if (kVerboseMode) {
419 LOG(INFO) << "AllowNewSystemWeaks";
420 }
421 Runtime::Current()->AllowNewSystemWeaks();
422 CheckEmptyMarkQueue();
423 }
424
425 if (kVerboseMode) {
426 LOG(INFO) << "GC end of MarkingPhase";
427 }
428}
429
430void ConcurrentCopying::IssueEmptyCheckpoint() {
431 Thread* self = Thread::Current();
432 EmptyCheckpoint check_point(this);
433 ThreadList* thread_list = Runtime::Current()->GetThreadList();
434 gc_barrier_->Init(self, 0);
435 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800436 // If there are no threads to wait which implys that all the checkpoint functions are finished,
437 // then no need to release the mutator lock.
438 if (barrier_count == 0) {
439 return;
440 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800441 // Release locks then wait for all mutator threads to pass the barrier.
442 Locks::mutator_lock_->SharedUnlock(self);
443 {
444 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
445 gc_barrier_->Increment(self, barrier_count);
446 }
447 Locks::mutator_lock_->SharedLock(self);
448}
449
450mirror::Object* ConcurrentCopying::PopOffMarkStack() {
451 return mark_queue_.Dequeue();
452}
453
454template<bool kThreadSafe>
455void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
456 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0)
457 << " " << to_ref << " " << PrettyTypeOf(to_ref);
458 if (kThreadSafe) {
459 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow";
460 } else {
461 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow";
462 }
463}
464
465accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
466 return heap_->allocation_stack_.get();
467}
468
469accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
470 return heap_->live_stack_.get();
471}
472
473inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
474 DCHECK(region_space_->IsInFromSpace(from_ref));
475 LockWord lw = from_ref->GetLockWord(false);
476 if (lw.GetState() == LockWord::kForwardingAddress) {
477 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
478 CHECK(fwd_ptr != nullptr);
479 return fwd_ptr;
480 } else {
481 return nullptr;
482 }
483}
484
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800485// The following visitors are that used to verify that there's no
486// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700487class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800488 public:
489 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
490 : collector_(collector) {}
491
492 void operator()(mirror::Object* ref) const
493 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
494 if (ref == nullptr) {
495 // OK.
496 return;
497 }
498 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
499 if (kUseBakerReadBarrier) {
500 if (collector_->RegionSpace()->IsInToSpace(ref)) {
501 CHECK(ref->GetReadBarrierPointer() == nullptr)
502 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
503 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
504 } else {
505 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
506 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
507 collector_->IsOnAllocStack(ref)))
508 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
509 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
510 << " but isn't on the alloc stack (and has white rb_ptr)."
511 << " Is it in the non-moving space="
512 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
513 }
514 }
515 }
516
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700517 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
518 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800519 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700520 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800521 }
522
523 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700524 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800525};
526
527class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
528 public:
529 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
530 : collector_(collector) {}
531
532 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
533 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
534 mirror::Object* ref =
535 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
536 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
537 visitor(ref);
538 }
539 void operator()(mirror::Class* klass, mirror::Reference* ref) const
540 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
541 CHECK(klass->IsTypeOfReferenceClass());
542 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
543 }
544
545 private:
546 ConcurrentCopying* collector_;
547};
548
549class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
550 public:
551 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
552 : collector_(collector) {}
553 void operator()(mirror::Object* obj) const
554 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
555 ObjectCallback(obj, collector_);
556 }
557 static void ObjectCallback(mirror::Object* obj, void *arg)
558 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
559 CHECK(obj != nullptr);
560 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
561 space::RegionSpace* region_space = collector->RegionSpace();
562 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
563 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
564 obj->VisitReferences<true>(visitor, visitor);
565 if (kUseBakerReadBarrier) {
566 if (collector->RegionSpace()->IsInToSpace(obj)) {
567 CHECK(obj->GetReadBarrierPointer() == nullptr)
568 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
569 } else {
570 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
571 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
572 collector->IsOnAllocStack(obj)))
573 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
574 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
575 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
576 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
577 }
578 }
579 }
580
581 private:
582 ConcurrentCopying* const collector_;
583};
584
585// Verify there's no from-space references left after the marking phase.
586void ConcurrentCopying::VerifyNoFromSpaceReferences() {
587 Thread* self = Thread::Current();
588 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
589 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
590 // Roots.
591 {
592 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700593 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
594 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800595 }
596 // The to-space.
597 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
598 this);
599 // Non-moving spaces.
600 {
601 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
602 heap_->GetMarkBitmap()->Visit(visitor);
603 }
604 // The alloc stack.
605 {
606 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800607 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
608 it < end; ++it) {
609 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800610 if (obj != nullptr && obj->GetClass() != nullptr) {
611 // TODO: need to call this only if obj is alive?
612 ref_visitor(obj);
613 visitor(obj);
614 }
615 }
616 }
617 // TODO: LOS. But only refs in LOS are classes.
618}
619
620// The following visitors are used to assert the to-space invariant.
621class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
622 public:
623 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
624 : collector_(collector) {}
625
626 void operator()(mirror::Object* ref) const
627 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
628 if (ref == nullptr) {
629 // OK.
630 return;
631 }
632 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
633 }
634 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
635 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
636 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
637 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
638 DCHECK(root != nullptr);
639 visitor(*root);
640 }
641
642 private:
643 ConcurrentCopying* collector_;
644};
645
646class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
647 public:
648 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
649 : collector_(collector) {}
650
651 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
652 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
653 mirror::Object* ref =
654 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
655 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
656 visitor(ref);
657 }
658 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
659 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
660 CHECK(klass->IsTypeOfReferenceClass());
661 }
662
663 private:
664 ConcurrentCopying* collector_;
665};
666
667class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
668 public:
669 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
670 : collector_(collector) {}
671 void operator()(mirror::Object* obj) const
672 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
673 ObjectCallback(obj, collector_);
674 }
675 static void ObjectCallback(mirror::Object* obj, void *arg)
676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
677 CHECK(obj != nullptr);
678 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
679 space::RegionSpace* region_space = collector->RegionSpace();
680 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
681 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
682 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
683 obj->VisitReferences<true>(visitor, visitor);
684 }
685
686 private:
687 ConcurrentCopying* collector_;
688};
689
690bool ConcurrentCopying::ProcessMarkStack() {
691 if (kVerboseMode) {
692 LOG(INFO) << "ProcessMarkStack. ";
693 }
694 size_t count = 0;
695 mirror::Object* to_ref;
696 while ((to_ref = PopOffMarkStack()) != nullptr) {
697 ++count;
698 DCHECK(!region_space_->IsInFromSpace(to_ref));
699 if (kUseBakerReadBarrier) {
700 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
701 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
702 << " is_marked=" << IsMarked(to_ref);
703 }
704 // Scan ref fields.
705 Scan(to_ref);
706 // Mark the gray ref as white or black.
707 if (kUseBakerReadBarrier) {
708 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
709 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
710 << " is_marked=" << IsMarked(to_ref);
711 }
712 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
713 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
714 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
715 // Leave References gray so that GetReferent() will trigger RB.
716 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
717 } else {
718 if (kUseBakerReadBarrier) {
719 if (region_space_->IsInToSpace(to_ref)) {
720 // If to-space, change from gray to white.
721 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
722 ReadBarrier::WhitePtr());
723 CHECK(success) << "Must succeed as we won the race.";
724 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
725 } else {
726 // If non-moving space/unevac from space, change from gray
727 // to black. We can't change gray to white because it's not
728 // safe to use CAS if two threads change values in opposite
729 // directions (A->B and B->A). So, we change it to black to
730 // indicate non-moving objects that have been marked
731 // through. Note we'd need to change from black to white
732 // later (concurrently).
733 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
734 ReadBarrier::BlackPtr());
735 CHECK(success) << "Must succeed as we won the race.";
736 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
737 }
738 }
739 }
740 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
741 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
742 visitor(to_ref);
743 }
744 }
745 // Return true if the stack was empty.
746 return count == 0;
747}
748
749void ConcurrentCopying::CheckEmptyMarkQueue() {
750 if (!mark_queue_.IsEmpty()) {
751 while (!mark_queue_.IsEmpty()) {
752 mirror::Object* obj = mark_queue_.Dequeue();
753 if (kUseBakerReadBarrier) {
754 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
755 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
756 << " is_marked=" << IsMarked(obj);
757 } else {
758 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
759 << " is_marked=" << IsMarked(obj);
760 }
761 }
762 LOG(FATAL) << "mark queue is not empty";
763 }
764}
765
766void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
767 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
768 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
769 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
770}
771
772void ConcurrentCopying::Sweep(bool swap_bitmaps) {
773 {
774 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
775 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
776 if (kEnableFromSpaceAccountingCheck) {
777 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
778 }
779 heap_->MarkAllocStackAsLive(live_stack);
780 live_stack->Reset();
781 }
782 CHECK(mark_queue_.IsEmpty());
783 TimingLogger::ScopedTiming split("Sweep", GetTimings());
784 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
785 if (space->IsContinuousMemMapAllocSpace()) {
786 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
787 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
788 continue;
789 }
790 TimingLogger::ScopedTiming split2(
791 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
792 RecordFree(alloc_space->Sweep(swap_bitmaps));
793 }
794 }
795 SweepLargeObjects(swap_bitmaps);
796}
797
798void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
799 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
800 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
801}
802
803class ConcurrentCopyingClearBlackPtrsVisitor {
804 public:
805 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
806 : collector_(cc) {}
807 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
808 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
809 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800810 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
811 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800812 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800813 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800814 }
815
816 private:
817 ConcurrentCopying* const collector_;
818};
819
820// Clear the black ptrs in non-moving objects back to white.
821void ConcurrentCopying::ClearBlackPtrs() {
822 CHECK(kUseBakerReadBarrier);
823 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
824 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
825 for (auto& space : heap_->GetContinuousSpaces()) {
826 if (space == region_space_) {
827 continue;
828 }
829 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
830 if (kVerboseMode) {
831 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
832 }
833 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
834 reinterpret_cast<uintptr_t>(space->Limit()),
835 visitor);
836 }
837 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
838 large_object_space->GetMarkBitmap()->VisitMarkedRange(
839 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
840 reinterpret_cast<uintptr_t>(large_object_space->End()),
841 visitor);
842 // Objects on the allocation stack?
843 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
844 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800845 auto* it = GetAllocationStack()->Begin();
846 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800847 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800848 CHECK_LT(it, end);
849 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800850 if (obj != nullptr) {
851 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800852 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800853 }
854 }
855 }
856}
857
858void ConcurrentCopying::ReclaimPhase() {
859 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
860 if (kVerboseMode) {
861 LOG(INFO) << "GC ReclaimPhase";
862 }
863 Thread* self = Thread::Current();
864
865 {
866 // Double-check that the mark stack is empty.
867 // Note: need to set this after VerifyNoFromSpaceRef().
868 is_asserting_to_space_invariant_ = false;
869 QuasiAtomic::ThreadFenceForConstructor();
870 if (kVerboseMode) {
871 LOG(INFO) << "Issue an empty check point. ";
872 }
873 IssueEmptyCheckpoint();
874 // Disable the check.
875 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0);
876 CheckEmptyMarkQueue();
877 }
878
879 {
880 // Record freed objects.
881 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
882 // Don't include thread-locals that are in the to-space.
883 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
884 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
885 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
886 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
887 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
888 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
889 if (kEnableFromSpaceAccountingCheck) {
890 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
891 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
892 }
893 CHECK_LE(to_objects, from_objects);
894 CHECK_LE(to_bytes, from_bytes);
895 int64_t freed_bytes = from_bytes - to_bytes;
896 int64_t freed_objects = from_objects - to_objects;
897 if (kVerboseMode) {
898 LOG(INFO) << "RecordFree:"
899 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
900 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
901 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
902 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
903 << " from_space size=" << region_space_->FromSpaceSize()
904 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
905 << " to_space size=" << region_space_->ToSpaceSize();
906 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
907 }
908 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
909 if (kVerboseMode) {
910 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
911 }
912 }
913
914 {
915 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
916 ComputeUnevacFromSpaceLiveRatio();
917 }
918
919 {
920 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
921 region_space_->ClearFromSpace();
922 }
923
924 {
925 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
926 if (kUseBakerReadBarrier) {
927 ClearBlackPtrs();
928 }
929 Sweep(false);
930 SwapBitmaps();
931 heap_->UnBindBitmaps();
932
933 // Remove bitmaps for the immune spaces.
934 while (!cc_bitmaps_.empty()) {
935 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
936 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
937 delete cc_bitmap;
938 cc_bitmaps_.pop_back();
939 }
940 region_space_bitmap_ = nullptr;
941 }
942
943 if (kVerboseMode) {
944 LOG(INFO) << "GC end of ReclaimPhase";
945 }
946}
947
948class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
949 public:
950 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
951 : collector_(cc) {}
952 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
953 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
954 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800955 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
956 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800957 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800958 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800959 // Clear the black ptr.
960 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr());
961 }
962 size_t obj_size = ref->SizeOf();
963 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
964 collector_->region_space_->AddLiveBytes(ref, alloc_size);
965 }
966
967 private:
968 ConcurrentCopying* collector_;
969};
970
971// Compute how much live objects are left in regions.
972void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
973 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
974 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
975 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
976 reinterpret_cast<uintptr_t>(region_space_->Limit()),
977 visitor);
978}
979
980// Assert the to-space invariant.
981void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
982 mirror::Object* ref) {
983 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
984 if (is_asserting_to_space_invariant_) {
985 if (region_space_->IsInToSpace(ref)) {
986 // OK.
987 return;
988 } else if (region_space_->IsInUnevacFromSpace(ref)) {
989 CHECK(region_space_bitmap_->Test(ref)) << ref;
990 } else if (region_space_->IsInFromSpace(ref)) {
991 // Not OK. Do extra logging.
992 if (obj != nullptr) {
993 if (kUseBakerReadBarrier) {
994 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
995 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
996 } else {
997 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
998 }
999 if (region_space_->IsInFromSpace(obj)) {
1000 LOG(INFO) << "holder is in the from-space.";
1001 } else if (region_space_->IsInToSpace(obj)) {
1002 LOG(INFO) << "holder is in the to-space.";
1003 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1004 LOG(INFO) << "holder is in the unevac from-space.";
1005 if (region_space_bitmap_->Test(obj)) {
1006 LOG(INFO) << "holder is marked in the region space bitmap.";
1007 } else {
1008 LOG(INFO) << "holder is not marked in the region space bitmap.";
1009 }
1010 } else {
1011 // In a non-moving space.
1012 if (immune_region_.ContainsObject(obj)) {
1013 LOG(INFO) << "holder is in the image or the zygote space.";
1014 accounting::ContinuousSpaceBitmap* cc_bitmap =
1015 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1016 CHECK(cc_bitmap != nullptr)
1017 << "An immune space object must have a bitmap.";
1018 if (cc_bitmap->Test(obj)) {
1019 LOG(INFO) << "holder is marked in the bit map.";
1020 } else {
1021 LOG(INFO) << "holder is NOT marked in the bit map.";
1022 }
1023 } else {
1024 LOG(INFO) << "holder is in a non-moving (or main) space.";
1025 accounting::ContinuousSpaceBitmap* mark_bitmap =
1026 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1027 accounting::LargeObjectBitmap* los_bitmap =
1028 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1029 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1030 bool is_los = mark_bitmap == nullptr;
1031 if (!is_los && mark_bitmap->Test(obj)) {
1032 LOG(INFO) << "holder is marked in the mark bit map.";
1033 } else if (is_los && los_bitmap->Test(obj)) {
1034 LOG(INFO) << "holder is marked in the los bit map.";
1035 } else {
1036 // If ref is on the allocation stack, then it is considered
1037 // mark/alive (but not necessarily on the live stack.)
1038 if (IsOnAllocStack(obj)) {
1039 LOG(INFO) << "holder is on the alloc stack.";
1040 } else {
1041 LOG(INFO) << "holder is not marked or on the alloc stack.";
1042 }
1043 }
1044 }
1045 }
1046 LOG(INFO) << "offset=" << offset.SizeValue();
1047 }
1048 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1049 } else {
1050 // In a non-moving spaces. Check that the ref is marked.
1051 if (immune_region_.ContainsObject(ref)) {
1052 accounting::ContinuousSpaceBitmap* cc_bitmap =
1053 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1054 CHECK(cc_bitmap != nullptr)
1055 << "An immune space ref must have a bitmap. " << ref;
1056 if (kUseBakerReadBarrier) {
1057 CHECK(cc_bitmap->Test(ref))
1058 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1059 << obj->GetReadBarrierPointer() << " ref=" << ref;
1060 } else {
1061 CHECK(cc_bitmap->Test(ref))
1062 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1063 }
1064 } else {
1065 accounting::ContinuousSpaceBitmap* mark_bitmap =
1066 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1067 accounting::LargeObjectBitmap* los_bitmap =
1068 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1069 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1070 bool is_los = mark_bitmap == nullptr;
1071 if ((!is_los && mark_bitmap->Test(ref)) ||
1072 (is_los && los_bitmap->Test(ref))) {
1073 // OK.
1074 } else {
1075 // If ref is on the allocation stack, then it may not be
1076 // marked live, but considered marked/alive (but not
1077 // necessarily on the live stack).
1078 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1079 << "obj=" << obj << " ref=" << ref;
1080 }
1081 }
1082 }
1083 }
1084}
1085
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001086// Used to scan ref fields of an object.
1087class ConcurrentCopyingRefFieldsVisitor {
1088 public:
1089 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1090 : collector_(collector) {}
1091
1092 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1093 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1094 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1095 collector_->Process(obj, offset);
1096 }
1097
1098 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1099 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1100 CHECK(klass->IsTypeOfReferenceClass());
1101 collector_->DelayReferenceReferent(klass, ref);
1102 }
1103
1104 private:
1105 ConcurrentCopying* const collector_;
1106};
1107
1108// Scan ref fields of an object.
1109void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1110 DCHECK(!region_space_->IsInFromSpace(to_ref));
1111 ConcurrentCopyingRefFieldsVisitor visitor(this);
1112 to_ref->VisitReferences<true>(visitor, visitor);
1113}
1114
1115// Process a field.
1116inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1117 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1118 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1119 return;
1120 }
1121 mirror::Object* to_ref = Mark(ref);
1122 if (to_ref == ref) {
1123 return;
1124 }
1125 // This may fail if the mutator writes to the field at the same time. But it's ok.
1126 mirror::Object* expected_ref = ref;
1127 mirror::Object* new_ref = to_ref;
1128 do {
1129 if (expected_ref !=
1130 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1131 // It was updated by the mutator.
1132 break;
1133 }
1134 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1135 offset, expected_ref, new_ref));
1136}
1137
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001138// Process some roots.
1139void ConcurrentCopying::VisitRoots(
1140 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1141 for (size_t i = 0; i < count; ++i) {
1142 mirror::Object** root = roots[i];
1143 mirror::Object* ref = *root;
1144 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1145 return;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001146 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001147 mirror::Object* to_ref = Mark(ref);
1148 if (to_ref == ref) {
1149 return;
1150 }
1151 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1152 mirror::Object* expected_ref = ref;
1153 mirror::Object* new_ref = to_ref;
1154 do {
1155 if (expected_ref != addr->LoadRelaxed()) {
1156 // It was updated by the mutator.
1157 break;
1158 }
1159 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1160 }
1161}
1162
1163void ConcurrentCopying::VisitRoots(
1164 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1165 const RootInfo& info ATTRIBUTE_UNUSED) {
1166 for (size_t i = 0; i < count; ++i) {
1167 mirror::CompressedReference<mirror::Object>* root = roots[i];
1168 mirror::Object* ref = root->AsMirrorPtr();
1169 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1170 return;
1171 }
1172 mirror::Object* to_ref = Mark(ref);
1173 if (to_ref == ref) {
1174 return;
1175 }
1176 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1177 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1178 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1179 do {
1180 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1181 // It was updated by the mutator.
1182 break;
1183 }
1184 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1185 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001186}
1187
1188// Fill the given memory block with a dummy object. Used to fill in a
1189// copy of objects that was lost in race.
1190void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1191 CHECK(IsAligned<kObjectAlignment>(byte_size));
1192 memset(dummy_obj, 0, byte_size);
1193 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1194 CHECK(int_array_class != nullptr);
1195 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1196 size_t component_size = int_array_class->GetComponentSize();
1197 CHECK_EQ(component_size, sizeof(int32_t));
1198 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1199 if (data_offset > byte_size) {
1200 // An int array is too big. Use java.lang.Object.
1201 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1202 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1203 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1204 dummy_obj->SetClass(java_lang_Object);
1205 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1206 } else {
1207 // Use an int array.
1208 dummy_obj->SetClass(int_array_class);
1209 CHECK(dummy_obj->IsArrayInstance());
1210 int32_t length = (byte_size - data_offset) / component_size;
1211 dummy_obj->AsArray()->SetLength(length);
1212 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1213 << "byte_size=" << byte_size << " length=" << length
1214 << " component_size=" << component_size << " data_offset=" << data_offset;
1215 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1216 << "byte_size=" << byte_size << " length=" << length
1217 << " component_size=" << component_size << " data_offset=" << data_offset;
1218 }
1219}
1220
1221// Reuse the memory blocks that were copy of objects that were lost in race.
1222mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1223 // Try to reuse the blocks that were unused due to CAS failures.
1224 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1225 Thread* self = Thread::Current();
1226 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1227 MutexLock mu(self, skipped_blocks_lock_);
1228 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1229 if (it == skipped_blocks_map_.end()) {
1230 // Not found.
1231 return nullptr;
1232 }
1233 {
1234 size_t byte_size = it->first;
1235 CHECK_GE(byte_size, alloc_size);
1236 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1237 // If remainder would be too small for a dummy object, retry with a larger request size.
1238 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1239 if (it == skipped_blocks_map_.end()) {
1240 // Not found.
1241 return nullptr;
1242 }
1243 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1244 CHECK_GE(it->first - alloc_size, min_object_size)
1245 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1246 }
1247 }
1248 // Found a block.
1249 CHECK(it != skipped_blocks_map_.end());
1250 size_t byte_size = it->first;
1251 uint8_t* addr = it->second;
1252 CHECK_GE(byte_size, alloc_size);
1253 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1254 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1255 if (kVerboseMode) {
1256 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1257 }
1258 skipped_blocks_map_.erase(it);
1259 memset(addr, 0, byte_size);
1260 if (byte_size > alloc_size) {
1261 // Return the remainder to the map.
1262 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1263 CHECK_GE(byte_size - alloc_size, min_object_size);
1264 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1265 byte_size - alloc_size);
1266 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1267 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1268 }
1269 return reinterpret_cast<mirror::Object*>(addr);
1270}
1271
1272mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1273 DCHECK(region_space_->IsInFromSpace(from_ref));
1274 // No read barrier to avoid nested RB that might violate the to-space
1275 // invariant. Note that from_ref is a from space ref so the SizeOf()
1276 // call will access the from-space meta objects, but it's ok and necessary.
1277 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1278 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1279 size_t region_space_bytes_allocated = 0U;
1280 size_t non_moving_space_bytes_allocated = 0U;
1281 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001282 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001283 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001284 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001285 bytes_allocated = region_space_bytes_allocated;
1286 if (to_ref != nullptr) {
1287 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1288 }
1289 bool fall_back_to_non_moving = false;
1290 if (UNLIKELY(to_ref == nullptr)) {
1291 // Failed to allocate in the region space. Try the skipped blocks.
1292 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1293 if (to_ref != nullptr) {
1294 // Succeeded to allocate in a skipped block.
1295 if (heap_->use_tlab_) {
1296 // This is necessary for the tlab case as it's not accounted in the space.
1297 region_space_->RecordAlloc(to_ref);
1298 }
1299 bytes_allocated = region_space_alloc_size;
1300 } else {
1301 // Fall back to the non-moving space.
1302 fall_back_to_non_moving = true;
1303 if (kVerboseMode) {
1304 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1305 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1306 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1307 }
1308 fall_back_to_non_moving = true;
1309 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001310 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001311 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1312 bytes_allocated = non_moving_space_bytes_allocated;
1313 // Mark it in the mark bitmap.
1314 accounting::ContinuousSpaceBitmap* mark_bitmap =
1315 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1316 CHECK(mark_bitmap != nullptr);
1317 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1318 }
1319 }
1320 DCHECK(to_ref != nullptr);
1321
1322 // Attempt to install the forward pointer. This is in a loop as the
1323 // lock word atomic write can fail.
1324 while (true) {
1325 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1326 memcpy(to_ref, from_ref, obj_size);
1327 // Set the gray ptr.
1328 if (kUseBakerReadBarrier) {
1329 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1330 }
1331
1332 LockWord old_lock_word = to_ref->GetLockWord(false);
1333
1334 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1335 // Lost the race. Another thread (either GC or mutator) stored
1336 // the forwarding pointer first. Make the lost copy (to_ref)
1337 // look like a valid but dead (dummy) object and keep it for
1338 // future reuse.
1339 FillWithDummyObject(to_ref, bytes_allocated);
1340 if (!fall_back_to_non_moving) {
1341 DCHECK(region_space_->IsInToSpace(to_ref));
1342 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1343 // Free the large alloc.
1344 region_space_->FreeLarge(to_ref, bytes_allocated);
1345 } else {
1346 // Record the lost copy for later reuse.
1347 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1348 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1349 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1350 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1351 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1352 reinterpret_cast<uint8_t*>(to_ref)));
1353 }
1354 } else {
1355 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1356 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1357 // Free the non-moving-space chunk.
1358 accounting::ContinuousSpaceBitmap* mark_bitmap =
1359 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1360 CHECK(mark_bitmap != nullptr);
1361 CHECK(mark_bitmap->Clear(to_ref));
1362 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1363 }
1364
1365 // Get the winner's forward ptr.
1366 mirror::Object* lost_fwd_ptr = to_ref;
1367 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1368 CHECK(to_ref != nullptr);
1369 CHECK_NE(to_ref, lost_fwd_ptr);
1370 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1371 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1372 return to_ref;
1373 }
1374
1375 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1376
1377 // Try to atomically write the fwd ptr.
1378 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1379 if (LIKELY(success)) {
1380 // The CAS succeeded.
1381 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1382 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1383 if (LIKELY(!fall_back_to_non_moving)) {
1384 DCHECK(region_space_->IsInToSpace(to_ref));
1385 } else {
1386 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1387 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1388 }
1389 if (kUseBakerReadBarrier) {
1390 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1391 }
1392 DCHECK(GetFwdPtr(from_ref) == to_ref);
1393 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1394 PushOntoMarkStack<true>(to_ref);
1395 return to_ref;
1396 } else {
1397 // The CAS failed. It may have lost the race or may have failed
1398 // due to monitor/hashcode ops. Either way, retry.
1399 }
1400 }
1401}
1402
1403mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1404 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001405 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1406 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001407 // It's already marked.
1408 return from_ref;
1409 }
1410 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001411 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001412 to_ref = GetFwdPtr(from_ref);
1413 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1414 heap_->non_moving_space_->HasAddress(to_ref))
1415 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001416 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001417 if (region_space_bitmap_->Test(from_ref)) {
1418 to_ref = from_ref;
1419 } else {
1420 to_ref = nullptr;
1421 }
1422 } else {
1423 // from_ref is in a non-moving space.
1424 if (immune_region_.ContainsObject(from_ref)) {
1425 accounting::ContinuousSpaceBitmap* cc_bitmap =
1426 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1427 DCHECK(cc_bitmap != nullptr)
1428 << "An immune space object must have a bitmap";
1429 if (kIsDebugBuild) {
1430 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1431 << "Immune space object must be already marked";
1432 }
1433 if (cc_bitmap->Test(from_ref)) {
1434 // Already marked.
1435 to_ref = from_ref;
1436 } else {
1437 // Newly marked.
1438 to_ref = nullptr;
1439 }
1440 } else {
1441 // Non-immune non-moving space. Use the mark bitmap.
1442 accounting::ContinuousSpaceBitmap* mark_bitmap =
1443 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1444 accounting::LargeObjectBitmap* los_bitmap =
1445 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1446 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1447 bool is_los = mark_bitmap == nullptr;
1448 if (!is_los && mark_bitmap->Test(from_ref)) {
1449 // Already marked.
1450 to_ref = from_ref;
1451 } else if (is_los && los_bitmap->Test(from_ref)) {
1452 // Already marked in LOS.
1453 to_ref = from_ref;
1454 } else {
1455 // Not marked.
1456 if (IsOnAllocStack(from_ref)) {
1457 // If on the allocation stack, it's considered marked.
1458 to_ref = from_ref;
1459 } else {
1460 // Not marked.
1461 to_ref = nullptr;
1462 }
1463 }
1464 }
1465 }
1466 return to_ref;
1467}
1468
1469bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1470 QuasiAtomic::ThreadFenceAcquire();
1471 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001472 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001473}
1474
1475mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1476 if (from_ref == nullptr) {
1477 return nullptr;
1478 }
1479 DCHECK(from_ref != nullptr);
1480 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001481 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1482 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001483 // It's already marked.
1484 return from_ref;
1485 }
1486 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001487 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001488 to_ref = GetFwdPtr(from_ref);
1489 if (kUseBakerReadBarrier) {
1490 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1491 }
1492 if (to_ref == nullptr) {
1493 // It isn't marked yet. Mark it by copying it to the to-space.
1494 to_ref = Copy(from_ref);
1495 }
1496 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1497 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001498 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001499 // This may or may not succeed, which is ok.
1500 if (kUseBakerReadBarrier) {
1501 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1502 }
1503 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1504 // Already marked.
1505 to_ref = from_ref;
1506 } else {
1507 // Newly marked.
1508 to_ref = from_ref;
1509 if (kUseBakerReadBarrier) {
1510 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1511 }
1512 PushOntoMarkStack<true>(to_ref);
1513 }
1514 } else {
1515 // from_ref is in a non-moving space.
1516 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1517 if (immune_region_.ContainsObject(from_ref)) {
1518 accounting::ContinuousSpaceBitmap* cc_bitmap =
1519 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1520 DCHECK(cc_bitmap != nullptr)
1521 << "An immune space object must have a bitmap";
1522 if (kIsDebugBuild) {
1523 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1524 << "Immune space object must be already marked";
1525 }
1526 // This may or may not succeed, which is ok.
1527 if (kUseBakerReadBarrier) {
1528 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1529 }
1530 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1531 // Already marked.
1532 to_ref = from_ref;
1533 } else {
1534 // Newly marked.
1535 to_ref = from_ref;
1536 if (kUseBakerReadBarrier) {
1537 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1538 }
1539 PushOntoMarkStack<true>(to_ref);
1540 }
1541 } else {
1542 // Use the mark bitmap.
1543 accounting::ContinuousSpaceBitmap* mark_bitmap =
1544 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1545 accounting::LargeObjectBitmap* los_bitmap =
1546 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1547 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1548 bool is_los = mark_bitmap == nullptr;
1549 if (!is_los && mark_bitmap->Test(from_ref)) {
1550 // Already marked.
1551 to_ref = from_ref;
1552 if (kUseBakerReadBarrier) {
1553 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1554 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1555 }
1556 } else if (is_los && los_bitmap->Test(from_ref)) {
1557 // Already marked in LOS.
1558 to_ref = from_ref;
1559 if (kUseBakerReadBarrier) {
1560 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1561 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1562 }
1563 } else {
1564 // Not marked.
1565 if (IsOnAllocStack(from_ref)) {
1566 // If it's on the allocation stack, it's considered marked. Keep it white.
1567 to_ref = from_ref;
1568 // Objects on the allocation stack need not be marked.
1569 if (!is_los) {
1570 DCHECK(!mark_bitmap->Test(to_ref));
1571 } else {
1572 DCHECK(!los_bitmap->Test(to_ref));
1573 }
1574 if (kUseBakerReadBarrier) {
1575 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1576 }
1577 } else {
1578 // Not marked or on the allocation stack. Try to mark it.
1579 // This may or may not succeed, which is ok.
1580 if (kUseBakerReadBarrier) {
1581 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1582 }
1583 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1584 // Already marked.
1585 to_ref = from_ref;
1586 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
1587 // Already marked in LOS.
1588 to_ref = from_ref;
1589 } else {
1590 // Newly marked.
1591 to_ref = from_ref;
1592 if (kUseBakerReadBarrier) {
1593 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1594 }
1595 PushOntoMarkStack<true>(to_ref);
1596 }
1597 }
1598 }
1599 }
1600 }
1601 return to_ref;
1602}
1603
1604void ConcurrentCopying::FinishPhase() {
1605 region_space_ = nullptr;
1606 CHECK(mark_queue_.IsEmpty());
1607 mark_queue_.Clear();
1608 {
1609 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1610 skipped_blocks_map_.clear();
1611 }
1612 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1613 heap_->ClearMarkedObjects();
1614}
1615
1616mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
1617 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1618}
1619
1620bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
1621 mirror::HeapReference<mirror::Object>* field, void* arg) {
1622 mirror::Object* from_ref = field->AsMirrorPtr();
1623 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1624 if (to_ref == nullptr) {
1625 return false;
1626 }
1627 if (from_ref != to_ref) {
1628 QuasiAtomic::ThreadFenceRelease();
1629 field->Assign(to_ref);
1630 QuasiAtomic::ThreadFenceSequentiallyConsistent();
1631 }
1632 return true;
1633}
1634
1635mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
1636 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
1637}
1638
1639void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
1640 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack();
1641}
1642
1643void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
1644 heap_->GetReferenceProcessor()->DelayReferenceReferent(
1645 klass, reference, &IsHeapReferenceMarkedCallback, this);
1646}
1647
1648void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) {
1649 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
1650 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1651 GetHeap()->GetReferenceProcessor()->ProcessReferences(
1652 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
1653 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
1654}
1655
1656void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
1657 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1658 region_space_->RevokeAllThreadLocalBuffers();
1659}
1660
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001661} // namespace collector
1662} // namespace gc
1663} // namespace art