blob: d7221e4578f4770c8066bf4d83b840aa3200252a [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070020#include "base/enums.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070021#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070022#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070023#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070024#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070026#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070028#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080030#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080031#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070033#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080034#include "mirror/object-inl.h"
35#include "scoped_thread_state_change.h"
36#include "thread-inl.h"
37#include "thread_list.h"
38#include "well_known_classes.h"
39
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070040namespace art {
41namespace gc {
42namespace collector {
43
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070044static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070045// If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
46// pages.
47static constexpr bool kGrayDirtyImmuneObjects = true;
48// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
49// union table. Disabled since it does not seem to help the pause much.
50static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070051// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
52// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
53// Only enabled for kIsDebugBuild to avoid performance hit.
54static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070055
Mathieu Chartier56fe2582016-07-14 13:30:03 -070056ConcurrentCopying::ConcurrentCopying(Heap* heap,
57 const std::string& name_prefix,
58 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080059 : GarbageCollector(heap,
60 name_prefix + (name_prefix.empty() ? "" : " ") +
61 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070062 region_space_(nullptr), gc_barrier_(new Barrier(0)),
63 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070064 kDefaultGcMarkStackSize,
65 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070066 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
67 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080068 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070069 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
71 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080072 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070073 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
74 rb_slow_path_ns_(0),
75 rb_slow_path_count_(0),
76 rb_slow_path_count_gc_(0),
77 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
78 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
79 rb_slow_path_count_total_(0),
80 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080081 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070082 force_evacuate_all_(false),
83 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
84 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080085 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
86 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070087 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080088 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
90 // Cache this so that we won't have to lock heap_bitmap_lock_ in
91 // Mark() which could cause a nested lock on heap_bitmap_lock_
92 // when GC causes a RB while doing GC or a lock order violation
93 // (class_linker_lock_ and heap_bitmap_lock_).
94 heap_mark_bitmap_ = heap->GetMarkBitmap();
95 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070096 {
97 MutexLock mu(self, mark_stack_lock_);
98 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
99 accounting::AtomicStack<mirror::Object>* mark_stack =
100 accounting::AtomicStack<mirror::Object>::Create(
101 "thread local mark stack", kMarkStackSize, kMarkStackSize);
102 pooled_mark_stacks_.push_back(mark_stack);
103 }
104 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800105}
106
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700107void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
108 // Used for preserving soft references, should be OK to not have a CAS here since there should be
109 // no other threads which can trigger read barriers on the same referent during reference
110 // processing.
111 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700112 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700113}
114
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800115ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700116 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117}
118
119void ConcurrentCopying::RunPhases() {
120 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
121 CHECK(!is_active_);
122 is_active_ = true;
123 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700124 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800125 Locks::mutator_lock_->AssertNotHeld(self);
126 {
127 ReaderMutexLock mu(self, *Locks::mutator_lock_);
128 InitializePhase();
129 }
130 FlipThreadRoots();
131 {
132 ReaderMutexLock mu(self, *Locks::mutator_lock_);
133 MarkingPhase();
134 }
135 // Verify no from space refs. This causes a pause.
136 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
137 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
138 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700139 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800140 if (kVerboseMode) {
141 LOG(INFO) << "Verifying no from-space refs";
142 }
143 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700144 if (kVerboseMode) {
145 LOG(INFO) << "Done verifying no from-space refs";
146 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700147 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800148 }
149 {
150 ReaderMutexLock mu(self, *Locks::mutator_lock_);
151 ReclaimPhase();
152 }
153 FinishPhase();
154 CHECK(is_active_);
155 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700156 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800157}
158
159void ConcurrentCopying::BindBitmaps() {
160 Thread* self = Thread::Current();
161 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
162 // Mark all of the spaces we never collect as immune.
163 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800164 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
165 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800166 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800167 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800168 } else if (space == region_space_) {
169 accounting::ContinuousSpaceBitmap* bitmap =
170 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
171 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172 region_space_bitmap_ = bitmap;
173 }
174 }
175}
176
177void ConcurrentCopying::InitializePhase() {
178 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
179 if (kVerboseMode) {
180 LOG(INFO) << "GC InitializePhase";
181 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
182 << reinterpret_cast<void*>(region_space_->Limit());
183 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700184 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800185 if (kIsDebugBuild) {
186 MutexLock mu(Thread::Current(), mark_stack_lock_);
187 CHECK(false_gray_stack_.empty());
188 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700189
190 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
191 if (measure_read_barrier_slow_path_) {
192 rb_slow_path_ns_.StoreRelaxed(0);
193 rb_slow_path_count_.StoreRelaxed(0);
194 rb_slow_path_count_gc_.StoreRelaxed(0);
195 }
196
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800197 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800198 bytes_moved_.StoreRelaxed(0);
199 objects_moved_.StoreRelaxed(0);
200 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
201 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
202 GetCurrentIteration()->GetClearSoftReferences()) {
203 force_evacuate_all_ = true;
204 } else {
205 force_evacuate_all_ = false;
206 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700207 if (kUseBakerReadBarrier) {
208 updated_all_immune_objects_.StoreRelaxed(false);
209 // GC may gray immune objects in the thread flip.
210 gc_grays_immune_objects_ = true;
211 if (kIsDebugBuild) {
212 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
213 DCHECK(immune_gray_stack_.empty());
214 }
215 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800216 BindBitmaps();
217 if (kVerboseMode) {
218 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800219 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
220 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
221 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
222 LOG(INFO) << "Immune space: " << *space;
223 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800224 LOG(INFO) << "GC end of InitializePhase";
225 }
226}
227
228// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700229class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800230 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100231 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800232 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
233 }
234
Mathieu Chartier90443472015-07-16 20:32:27 -0700235 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800236 // Note: self is not necessarily equal to thread since thread may be suspended.
237 Thread* self = Thread::Current();
238 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
239 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700240 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800241 if (use_tlab_ && thread->HasTlab()) {
242 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
243 // This must come before the revoke.
244 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
245 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
246 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
247 FetchAndAddSequentiallyConsistent(thread_local_objects);
248 } else {
249 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
250 }
251 }
252 if (kUseThreadLocalAllocationStack) {
253 thread->RevokeThreadLocalAllocationStack();
254 }
255 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700256 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
257 // only.
258 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800259 concurrent_copying_->GetBarrier().Pass(self);
260 }
261
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700262 void VisitRoots(mirror::Object*** roots,
263 size_t count,
264 const RootInfo& info ATTRIBUTE_UNUSED)
265 SHARED_REQUIRES(Locks::mutator_lock_) {
266 for (size_t i = 0; i < count; ++i) {
267 mirror::Object** root = roots[i];
268 mirror::Object* ref = *root;
269 if (ref != nullptr) {
270 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
271 if (to_ref != ref) {
272 *root = to_ref;
273 }
274 }
275 }
276 }
277
278 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
279 size_t count,
280 const RootInfo& info ATTRIBUTE_UNUSED)
281 SHARED_REQUIRES(Locks::mutator_lock_) {
282 for (size_t i = 0; i < count; ++i) {
283 mirror::CompressedReference<mirror::Object>* const root = roots[i];
284 if (!root->IsNull()) {
285 mirror::Object* ref = root->AsMirrorPtr();
286 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
287 if (to_ref != ref) {
288 root->Assign(to_ref);
289 }
290 }
291 }
292 }
293
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800294 private:
295 ConcurrentCopying* const concurrent_copying_;
296 const bool use_tlab_;
297};
298
299// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700300class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800301 public:
302 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
303 : concurrent_copying_(concurrent_copying) {
304 }
305
Mathieu Chartier90443472015-07-16 20:32:27 -0700306 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800307 ConcurrentCopying* cc = concurrent_copying_;
308 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
309 // Note: self is not necessarily equal to thread since thread may be suspended.
310 Thread* self = Thread::Current();
311 CHECK(thread == self);
312 Locks::mutator_lock_->AssertExclusiveHeld(self);
313 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700314 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800315 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
316 cc->RecordLiveStackFreezeSize(self);
317 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
318 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
319 }
320 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700321 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800322 if (kIsDebugBuild) {
323 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
324 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800325 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800326 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800327 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700328 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800329 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700330 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
331 cc->GrayAllDirtyImmuneObjects();
332 if (kIsDebugBuild) {
333 // Check that all non-gray immune objects only refernce immune objects.
334 cc->VerifyGrayImmuneObjects();
335 }
336 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800337 }
338
339 private:
340 ConcurrentCopying* const concurrent_copying_;
341};
342
Mathieu Chartier21328a12016-07-22 10:47:45 -0700343class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
344 public:
345 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
346 : collector_(collector) {}
347
348 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
349 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
350 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
351 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
352 obj, offset);
353 }
354
355 void operator()(mirror::Class* klass, mirror::Reference* ref) const
356 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
357 CHECK(klass->IsTypeOfReferenceClass());
358 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
359 ref,
360 mirror::Reference::ReferentOffset());
361 }
362
363 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
364 ALWAYS_INLINE
365 SHARED_REQUIRES(Locks::mutator_lock_) {
366 if (!root->IsNull()) {
367 VisitRoot(root);
368 }
369 }
370
371 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
372 ALWAYS_INLINE
373 SHARED_REQUIRES(Locks::mutator_lock_) {
374 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
375 }
376
377 private:
378 ConcurrentCopying* const collector_;
379
380 void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
381 SHARED_REQUIRES(Locks::mutator_lock_) {
382 if (ref != nullptr) {
383 CHECK(collector_->immune_spaces_.ContainsObject(ref))
384 << "Non gray object references non immune object "<< ref << " " << PrettyTypeOf(ref)
385 << " in holder " << holder << " " << PrettyTypeOf(holder) << " offset="
386 << offset.Uint32Value();
387 }
388 }
389};
390
391void ConcurrentCopying::VerifyGrayImmuneObjects() {
392 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
393 for (auto& space : immune_spaces_.GetSpaces()) {
394 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
395 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
396 VerifyGrayImmuneObjectsVisitor visitor(this);
397 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
398 reinterpret_cast<uintptr_t>(space->Limit()),
399 [&visitor](mirror::Object* obj)
400 SHARED_REQUIRES(Locks::mutator_lock_) {
401 // If an object is not gray, it should only have references to things in the immune spaces.
402 if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
403 obj->VisitReferences</*kVisitNativeRoots*/true,
404 kDefaultVerifyFlags,
405 kWithoutReadBarrier>(visitor, visitor);
406 }
407 });
408 }
409}
410
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800411// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
412void ConcurrentCopying::FlipThreadRoots() {
413 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
414 if (kVerboseMode) {
415 LOG(INFO) << "time=" << region_space_->Time();
416 region_space_->DumpNonFreeRegions(LOG(INFO));
417 }
418 Thread* self = Thread::Current();
419 Locks::mutator_lock_->AssertNotHeld(self);
420 gc_barrier_->Init(self, 0);
421 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
422 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700423 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800424 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
425 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700426 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800427 {
428 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
429 gc_barrier_->Increment(self, barrier_count);
430 }
431 is_asserting_to_space_invariant_ = true;
432 QuasiAtomic::ThreadFenceForConstructor();
433 if (kVerboseMode) {
434 LOG(INFO) << "time=" << region_space_->Time();
435 region_space_->DumpNonFreeRegions(LOG(INFO));
436 LOG(INFO) << "GC end of FlipThreadRoots";
437 }
438}
439
Mathieu Chartier21328a12016-07-22 10:47:45 -0700440class ConcurrentCopying::GrayImmuneObjectVisitor {
441 public:
442 explicit GrayImmuneObjectVisitor() {}
443
444 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
445 if (kUseBakerReadBarrier) {
446 if (kIsDebugBuild) {
447 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
448 }
449 obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
450 }
451 }
452
453 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
454 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
455 }
456};
457
458void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
459 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
460 gc::Heap* const heap = Runtime::Current()->GetHeap();
461 accounting::CardTable* const card_table = heap->GetCardTable();
462 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
463 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
464 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
465 GrayImmuneObjectVisitor visitor;
466 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
467 // Mark all the objects on dirty cards since these may point to objects in other space.
468 // Once these are marked, the GC will eventually clear them later.
469 // Table is non null for boot image and zygote spaces. It is only null for application image
470 // spaces.
471 if (table != nullptr) {
472 // TODO: Add preclean outside the pause.
473 table->ClearCards();
474 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
475 } else {
476 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
477 // pause because app image spaces are all dirty pages anyways.
478 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
479 }
480 }
481 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
482 // barriers in the immune spaces.
483 updated_all_immune_objects_.StoreRelaxed(true);
484}
485
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700486void ConcurrentCopying::SwapStacks() {
487 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800488}
489
490void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
491 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
492 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
493}
494
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800495class EmptyCheckpoint : public Closure {
496 public:
497 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
498 : concurrent_copying_(concurrent_copying) {
499 }
500
501 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
502 // Note: self is not necessarily equal to thread since thread may be suspended.
503 Thread* self = Thread::Current();
504 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
505 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800506 // If thread is a running mutator, then act on behalf of the garbage collector.
507 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700508 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800509 }
510
511 private:
512 ConcurrentCopying* const concurrent_copying_;
513};
514
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700515// Used to visit objects in the immune spaces.
516inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
517 DCHECK(obj != nullptr);
518 DCHECK(immune_spaces_.ContainsObject(obj));
519 // Update the fields without graying it or pushing it onto the mark stack.
520 Scan(obj);
521}
522
523class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
524 public:
525 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
526 : collector_(cc) {}
527
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700528 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700529 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
530 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
531 collector_->ScanImmuneObject(obj);
532 // Done scanning the object, go back to white.
533 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
534 ReadBarrier::WhitePtr());
535 CHECK(success);
536 }
537 } else {
538 collector_->ScanImmuneObject(obj);
539 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700540 }
541
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700542 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
543 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
544 }
545
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700546 private:
547 ConcurrentCopying* const collector_;
548};
549
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800550// Concurrently mark roots that are guarded by read barriers and process the mark stack.
551void ConcurrentCopying::MarkingPhase() {
552 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
553 if (kVerboseMode) {
554 LOG(INFO) << "GC MarkingPhase";
555 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700556 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700557
558 // Scan immune spaces.
559 // Update all the fields in the immune spaces first without graying the objects so that we
560 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
561 // of the objects.
562 if (kUseBakerReadBarrier) {
563 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700564 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700565 {
566 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
567 for (auto& space : immune_spaces_.GetSpaces()) {
568 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
569 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700570 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700571 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700572 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
573 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
574 } else {
575 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
576 reinterpret_cast<uintptr_t>(space->Limit()),
577 visitor);
578 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700579 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700580 }
581 if (kUseBakerReadBarrier) {
582 // This release fence makes the field updates in the above loop visible before allowing mutator
583 // getting access to immune objects without graying it first.
584 updated_all_immune_objects_.StoreRelease(true);
585 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
586 // the above loop because we would incorrectly disable the read barrier by whitening an object
587 // which may point to an unscanned, white object, breaking the to-space invariant.
588 //
589 // Make sure no mutators are in the middle of marking an immune object before whitening immune
590 // objects.
591 IssueEmptyCheckpoint();
592 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
593 if (kVerboseMode) {
594 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
595 }
596 for (mirror::Object* obj : immune_gray_stack_) {
597 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
598 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
599 ReadBarrier::WhitePtr());
600 DCHECK(success);
601 }
602 immune_gray_stack_.clear();
603 }
604
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800605 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700606 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
607 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800608 }
609 {
610 // TODO: don't visit the transaction roots if it's not active.
611 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700612 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800613 }
614
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800615 Thread* self = Thread::Current();
616 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700617 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700618 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
619 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
620 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
621 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
622 // reach the point where we process weak references, we can avoid using a lock when accessing
623 // the GC mark stack, which makes mark stack processing more efficient.
624
625 // Process the mark stack once in the thread local stack mode. This marks most of the live
626 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
627 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
628 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800629 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700630 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
631 // for the last time before transitioning to the shared mark stack mode, which would process new
632 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
633 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
634 // important to do these together in a single checkpoint so that we can ensure that mutators
635 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
636 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
637 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
638 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
639 SwitchToSharedMarkStackMode();
640 CHECK(!self->GetWeakRefAccessEnabled());
641 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
642 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
643 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
644 // (via read barriers) have no way to produce any more refs to process. Marking converges once
645 // before we process weak refs below.
646 ProcessMarkStack();
647 CheckEmptyMarkStack();
648 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
649 // lock from this point on.
650 SwitchToGcExclusiveMarkStackMode();
651 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800652 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800653 LOG(INFO) << "ProcessReferences";
654 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700655 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700656 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700657 ProcessReferences(self);
658 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800659 if (kVerboseMode) {
660 LOG(INFO) << "SweepSystemWeaks";
661 }
662 SweepSystemWeaks(self);
663 if (kVerboseMode) {
664 LOG(INFO) << "SweepSystemWeaks done";
665 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700666 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
667 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
668 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800669 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700670 CheckEmptyMarkStack();
671 // Re-enable weak ref accesses.
672 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700673 // Free data for class loaders that we unloaded.
674 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700675 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700676 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800677 if (kUseBakerReadBarrier) {
678 ProcessFalseGrayStack();
679 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700680 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800681 }
682
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700683 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800684 if (kVerboseMode) {
685 LOG(INFO) << "GC end of MarkingPhase";
686 }
687}
688
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700689void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
690 if (kVerboseMode) {
691 LOG(INFO) << "ReenableWeakRefAccess";
692 }
693 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
694 QuasiAtomic::ThreadFenceForConstructor();
695 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
696 {
697 MutexLock mu(self, *Locks::thread_list_lock_);
698 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
699 for (Thread* thread : thread_list) {
700 thread->SetWeakRefAccessEnabled(true);
701 }
702 }
703 // Unblock blocking threads.
704 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
705 Runtime::Current()->BroadcastForNewSystemWeaks();
706}
707
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700708class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700709 public:
710 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
711 : concurrent_copying_(concurrent_copying) {
712 }
713
714 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
715 // Note: self is not necessarily equal to thread since thread may be suspended.
716 Thread* self = Thread::Current();
717 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
718 << thread->GetState() << " thread " << thread << " self " << self;
719 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700720 // Note a thread that has just started right before this checkpoint may have already this flag
721 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700722 thread->SetIsGcMarking(false);
723 // If thread is a running mutator, then act on behalf of the garbage collector.
724 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700725 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700726 }
727
728 private:
729 ConcurrentCopying* const concurrent_copying_;
730};
731
732void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
733 Thread* self = Thread::Current();
734 DisableMarkingCheckpoint check_point(this);
735 ThreadList* thread_list = Runtime::Current()->GetThreadList();
736 gc_barrier_->Init(self, 0);
737 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
738 // If there are no threads to wait which implies that all the checkpoint functions are finished,
739 // then no need to release the mutator lock.
740 if (barrier_count == 0) {
741 return;
742 }
743 // Release locks then wait for all mutator threads to pass the barrier.
744 Locks::mutator_lock_->SharedUnlock(self);
745 {
746 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
747 gc_barrier_->Increment(self, barrier_count);
748 }
749 Locks::mutator_lock_->SharedLock(self);
750}
751
752void ConcurrentCopying::DisableMarking() {
753 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
754 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
755 is_marking_ = false;
756 QuasiAtomic::ThreadFenceForConstructor();
757 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
758 // still in the middle of a read barrier which may have a from-space ref cached in a local
759 // variable.
760 IssueDisableMarkingCheckpoint();
761 if (kUseTableLookupReadBarrier) {
762 heap_->rb_table_->ClearAll();
763 DCHECK(heap_->rb_table_->IsAllCleared());
764 }
765 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
766 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
767}
768
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800769void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
770 CHECK(kUseBakerReadBarrier);
771 DCHECK(ref != nullptr);
772 MutexLock mu(Thread::Current(), mark_stack_lock_);
773 false_gray_stack_.push_back(ref);
774}
775
776void ConcurrentCopying::ProcessFalseGrayStack() {
777 CHECK(kUseBakerReadBarrier);
778 // Change the objects on the false gray stack from gray to white.
779 MutexLock mu(Thread::Current(), mark_stack_lock_);
780 for (mirror::Object* obj : false_gray_stack_) {
781 DCHECK(IsMarked(obj));
782 // The object could be white here if a thread got preempted after a success at the
783 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
784 // still gray), and the thread ran to register it onto the false gray stack.
785 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
786 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
787 ReadBarrier::WhitePtr());
788 DCHECK(success);
789 }
790 }
791 false_gray_stack_.clear();
792}
793
794
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800795void ConcurrentCopying::IssueEmptyCheckpoint() {
796 Thread* self = Thread::Current();
797 EmptyCheckpoint check_point(this);
798 ThreadList* thread_list = Runtime::Current()->GetThreadList();
799 gc_barrier_->Init(self, 0);
800 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800801 // If there are no threads to wait which implys that all the checkpoint functions are finished,
802 // then no need to release the mutator lock.
803 if (barrier_count == 0) {
804 return;
805 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800806 // Release locks then wait for all mutator threads to pass the barrier.
807 Locks::mutator_lock_->SharedUnlock(self);
808 {
809 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
810 gc_barrier_->Increment(self, barrier_count);
811 }
812 Locks::mutator_lock_->SharedLock(self);
813}
814
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700815void ConcurrentCopying::ExpandGcMarkStack() {
816 DCHECK(gc_mark_stack_->IsFull());
817 const size_t new_size = gc_mark_stack_->Capacity() * 2;
818 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
819 gc_mark_stack_->End());
820 gc_mark_stack_->Resize(new_size);
821 for (auto& ref : temp) {
822 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
823 }
824 DCHECK(!gc_mark_stack_->IsFull());
825}
826
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800827void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700828 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800829 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700830 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
831 CHECK(thread_running_gc_ != nullptr);
832 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700833 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
834 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700835 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
836 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700837 if (UNLIKELY(gc_mark_stack_->IsFull())) {
838 ExpandGcMarkStack();
839 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700840 gc_mark_stack_->PushBack(to_ref);
841 } else {
842 // Otherwise, use a thread-local mark stack.
843 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
844 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
845 MutexLock mu(self, mark_stack_lock_);
846 // Get a new thread local mark stack.
847 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
848 if (!pooled_mark_stacks_.empty()) {
849 // Use a pooled mark stack.
850 new_tl_mark_stack = pooled_mark_stacks_.back();
851 pooled_mark_stacks_.pop_back();
852 } else {
853 // None pooled. Create a new one.
854 new_tl_mark_stack =
855 accounting::AtomicStack<mirror::Object>::Create(
856 "thread local mark stack", 4 * KB, 4 * KB);
857 }
858 DCHECK(new_tl_mark_stack != nullptr);
859 DCHECK(new_tl_mark_stack->IsEmpty());
860 new_tl_mark_stack->PushBack(to_ref);
861 self->SetThreadLocalMarkStack(new_tl_mark_stack);
862 if (tl_mark_stack != nullptr) {
863 // Store the old full stack into a vector.
864 revoked_mark_stacks_.push_back(tl_mark_stack);
865 }
866 } else {
867 tl_mark_stack->PushBack(to_ref);
868 }
869 }
870 } else if (mark_stack_mode == kMarkStackModeShared) {
871 // Access the shared GC mark stack with a lock.
872 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700873 if (UNLIKELY(gc_mark_stack_->IsFull())) {
874 ExpandGcMarkStack();
875 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700876 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800877 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700878 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700879 static_cast<uint32_t>(kMarkStackModeGcExclusive))
880 << "ref=" << to_ref
881 << " self->gc_marking=" << self->GetIsGcMarking()
882 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700883 CHECK(self == thread_running_gc_)
884 << "Only GC-running thread should access the mark stack "
885 << "in the GC exclusive mark stack mode";
886 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700887 if (UNLIKELY(gc_mark_stack_->IsFull())) {
888 ExpandGcMarkStack();
889 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700890 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800891 }
892}
893
894accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
895 return heap_->allocation_stack_.get();
896}
897
898accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
899 return heap_->live_stack_.get();
900}
901
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800902// The following visitors are used to verify that there's no references to the from-space left after
903// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700904class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800905 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700906 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800907 : collector_(collector) {}
908
909 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700910 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800911 if (ref == nullptr) {
912 // OK.
913 return;
914 }
915 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
916 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800917 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
918 << "Ref " << ref << " " << PrettyTypeOf(ref)
919 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800920 }
921 }
922
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700923 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700924 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800925 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700926 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800927 }
928
929 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700930 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800931};
932
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700933class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800934 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700935 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800936 : collector_(collector) {}
937
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700938 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700939 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800940 mirror::Object* ref =
941 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700942 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800943 visitor(ref);
944 }
945 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700946 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800947 CHECK(klass->IsTypeOfReferenceClass());
948 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
949 }
950
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700951 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
952 SHARED_REQUIRES(Locks::mutator_lock_) {
953 if (!root->IsNull()) {
954 VisitRoot(root);
955 }
956 }
957
958 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
959 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700960 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700961 visitor(root->AsMirrorPtr());
962 }
963
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800964 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700965 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800966};
967
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700968class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800969 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700970 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800971 : collector_(collector) {}
972 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700973 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800974 ObjectCallback(obj, collector_);
975 }
976 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700977 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800978 CHECK(obj != nullptr);
979 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
980 space::RegionSpace* region_space = collector->RegionSpace();
981 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700982 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700983 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800984 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800985 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
986 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800987 }
988 }
989
990 private:
991 ConcurrentCopying* const collector_;
992};
993
994// Verify there's no from-space references left after the marking phase.
995void ConcurrentCopying::VerifyNoFromSpaceReferences() {
996 Thread* self = Thread::Current();
997 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700998 // Verify all threads have is_gc_marking to be false
999 {
1000 MutexLock mu(self, *Locks::thread_list_lock_);
1001 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1002 for (Thread* thread : thread_list) {
1003 CHECK(!thread->GetIsGcMarking());
1004 }
1005 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001006 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001007 // Roots.
1008 {
1009 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001010 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001011 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001012 }
1013 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001014 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001015 // Non-moving spaces.
1016 {
1017 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1018 heap_->GetMarkBitmap()->Visit(visitor);
1019 }
1020 // The alloc stack.
1021 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001022 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001023 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1024 it < end; ++it) {
1025 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001026 if (obj != nullptr && obj->GetClass() != nullptr) {
1027 // TODO: need to call this only if obj is alive?
1028 ref_visitor(obj);
1029 visitor(obj);
1030 }
1031 }
1032 }
1033 // TODO: LOS. But only refs in LOS are classes.
1034}
1035
1036// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001037class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001038 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001039 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001040 : collector_(collector) {}
1041
1042 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001043 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001044 if (ref == nullptr) {
1045 // OK.
1046 return;
1047 }
1048 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1049 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001050
1051 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001052 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001053};
1054
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001055class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001056 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001057 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001058 : collector_(collector) {}
1059
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001060 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001061 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001062 mirror::Object* ref =
1063 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001064 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001065 visitor(ref);
1066 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001067 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001068 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001069 CHECK(klass->IsTypeOfReferenceClass());
1070 }
1071
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001072 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1073 SHARED_REQUIRES(Locks::mutator_lock_) {
1074 if (!root->IsNull()) {
1075 VisitRoot(root);
1076 }
1077 }
1078
1079 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1080 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001081 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001082 visitor(root->AsMirrorPtr());
1083 }
1084
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001085 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001086 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001087};
1088
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001089class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001090 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001091 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001092 : collector_(collector) {}
1093 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001094 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001095 ObjectCallback(obj, collector_);
1096 }
1097 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001098 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001099 CHECK(obj != nullptr);
1100 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1101 space::RegionSpace* region_space = collector->RegionSpace();
1102 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1103 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001104 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001105 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001106 }
1107
1108 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001109 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001110};
1111
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001112class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001113 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001114 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1115 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001116 : concurrent_copying_(concurrent_copying),
1117 disable_weak_ref_access_(disable_weak_ref_access) {
1118 }
1119
1120 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1121 // Note: self is not necessarily equal to thread since thread may be suspended.
1122 Thread* self = Thread::Current();
1123 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1124 << thread->GetState() << " thread " << thread << " self " << self;
1125 // Revoke thread local mark stacks.
1126 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1127 if (tl_mark_stack != nullptr) {
1128 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1129 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1130 thread->SetThreadLocalMarkStack(nullptr);
1131 }
1132 // Disable weak ref access.
1133 if (disable_weak_ref_access_) {
1134 thread->SetWeakRefAccessEnabled(false);
1135 }
1136 // If thread is a running mutator, then act on behalf of the garbage collector.
1137 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001138 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001139 }
1140
1141 private:
1142 ConcurrentCopying* const concurrent_copying_;
1143 const bool disable_weak_ref_access_;
1144};
1145
1146void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
1147 Thread* self = Thread::Current();
1148 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1149 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1150 gc_barrier_->Init(self, 0);
1151 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1152 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1153 // then no need to release the mutator lock.
1154 if (barrier_count == 0) {
1155 return;
1156 }
1157 Locks::mutator_lock_->SharedUnlock(self);
1158 {
1159 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1160 gc_barrier_->Increment(self, barrier_count);
1161 }
1162 Locks::mutator_lock_->SharedLock(self);
1163}
1164
1165void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1166 Thread* self = Thread::Current();
1167 CHECK_EQ(self, thread);
1168 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1169 if (tl_mark_stack != nullptr) {
1170 CHECK(is_marking_);
1171 MutexLock mu(self, mark_stack_lock_);
1172 revoked_mark_stacks_.push_back(tl_mark_stack);
1173 thread->SetThreadLocalMarkStack(nullptr);
1174 }
1175}
1176
1177void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001178 if (kVerboseMode) {
1179 LOG(INFO) << "ProcessMarkStack. ";
1180 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001181 bool empty_prev = false;
1182 while (true) {
1183 bool empty = ProcessMarkStackOnce();
1184 if (empty_prev && empty) {
1185 // Saw empty mark stack for a second time, done.
1186 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001187 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001188 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001189 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001190}
1191
1192bool ConcurrentCopying::ProcessMarkStackOnce() {
1193 Thread* self = Thread::Current();
1194 CHECK(thread_running_gc_ != nullptr);
1195 CHECK(self == thread_running_gc_);
1196 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1197 size_t count = 0;
1198 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1199 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1200 // Process the thread-local mark stacks and the GC mark stack.
1201 count += ProcessThreadLocalMarkStacks(false);
1202 while (!gc_mark_stack_->IsEmpty()) {
1203 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1204 ProcessMarkStackRef(to_ref);
1205 ++count;
1206 }
1207 gc_mark_stack_->Reset();
1208 } else if (mark_stack_mode == kMarkStackModeShared) {
1209 // Process the shared GC mark stack with a lock.
1210 {
1211 MutexLock mu(self, mark_stack_lock_);
1212 CHECK(revoked_mark_stacks_.empty());
1213 }
1214 while (true) {
1215 std::vector<mirror::Object*> refs;
1216 {
1217 // Copy refs with lock. Note the number of refs should be small.
1218 MutexLock mu(self, mark_stack_lock_);
1219 if (gc_mark_stack_->IsEmpty()) {
1220 break;
1221 }
1222 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1223 p != gc_mark_stack_->End(); ++p) {
1224 refs.push_back(p->AsMirrorPtr());
1225 }
1226 gc_mark_stack_->Reset();
1227 }
1228 for (mirror::Object* ref : refs) {
1229 ProcessMarkStackRef(ref);
1230 ++count;
1231 }
1232 }
1233 } else {
1234 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1235 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1236 {
1237 MutexLock mu(self, mark_stack_lock_);
1238 CHECK(revoked_mark_stacks_.empty());
1239 }
1240 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1241 while (!gc_mark_stack_->IsEmpty()) {
1242 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1243 ProcessMarkStackRef(to_ref);
1244 ++count;
1245 }
1246 gc_mark_stack_->Reset();
1247 }
1248
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001249 // Return true if the stack was empty.
1250 return count == 0;
1251}
1252
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001253size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1254 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1255 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1256 size_t count = 0;
1257 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1258 {
1259 MutexLock mu(Thread::Current(), mark_stack_lock_);
1260 // Make a copy of the mark stack vector.
1261 mark_stacks = revoked_mark_stacks_;
1262 revoked_mark_stacks_.clear();
1263 }
1264 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1265 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1266 mirror::Object* to_ref = p->AsMirrorPtr();
1267 ProcessMarkStackRef(to_ref);
1268 ++count;
1269 }
1270 {
1271 MutexLock mu(Thread::Current(), mark_stack_lock_);
1272 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1273 // The pool has enough. Delete it.
1274 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001275 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001276 // Otherwise, put it into the pool for later reuse.
1277 mark_stack->Reset();
1278 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001279 }
1280 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001281 }
1282 return count;
1283}
1284
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001285inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001286 DCHECK(!region_space_->IsInFromSpace(to_ref));
1287 if (kUseBakerReadBarrier) {
1288 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1289 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1290 << " is_marked=" << IsMarked(to_ref);
1291 }
1292 // Scan ref fields.
1293 Scan(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001294 if (kUseBakerReadBarrier) {
1295 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1296 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1297 << " is_marked=" << IsMarked(to_ref);
1298 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001299#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1300 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1301 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1302 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001303 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1304 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001305 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001306 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001307 // We may occasionally leave a reference white in the queue if its referent happens to be
1308 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1309 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1310 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001311 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001312 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1313 ReadBarrier::GrayPtr(),
1314 ReadBarrier::WhitePtr());
1315 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001316 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001317 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001318#else
1319 DCHECK(!kUseBakerReadBarrier);
1320#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001321
1322 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1323 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1324 // GC-running thread (no synchronization required).
1325 DCHECK(region_space_bitmap_->Test(to_ref));
1326 // Disable the read barrier in SizeOf for performance, which is safe.
1327 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1328 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1329 region_space_->AddLiveBytes(to_ref, alloc_size);
1330 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001331 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001332 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001333 visitor(to_ref);
1334 }
1335}
1336
1337void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1338 Thread* self = Thread::Current();
1339 CHECK(thread_running_gc_ != nullptr);
1340 CHECK_EQ(self, thread_running_gc_);
1341 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1342 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1343 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1344 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1345 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1346 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1347 weak_ref_access_enabled_.StoreRelaxed(false);
1348 QuasiAtomic::ThreadFenceForConstructor();
1349 // Process the thread local mark stacks one last time after switching to the shared mark stack
1350 // mode and disable weak ref accesses.
1351 ProcessThreadLocalMarkStacks(true);
1352 if (kVerboseMode) {
1353 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1354 }
1355}
1356
1357void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1358 Thread* self = Thread::Current();
1359 CHECK(thread_running_gc_ != nullptr);
1360 CHECK_EQ(self, thread_running_gc_);
1361 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1362 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1363 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1364 static_cast<uint32_t>(kMarkStackModeShared));
1365 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1366 QuasiAtomic::ThreadFenceForConstructor();
1367 if (kVerboseMode) {
1368 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1369 }
1370}
1371
1372void ConcurrentCopying::CheckEmptyMarkStack() {
1373 Thread* self = Thread::Current();
1374 CHECK(thread_running_gc_ != nullptr);
1375 CHECK_EQ(self, thread_running_gc_);
1376 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1377 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1378 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1379 // Thread-local mark stack mode.
1380 RevokeThreadLocalMarkStacks(false);
1381 MutexLock mu(Thread::Current(), mark_stack_lock_);
1382 if (!revoked_mark_stacks_.empty()) {
1383 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1384 while (!mark_stack->IsEmpty()) {
1385 mirror::Object* obj = mark_stack->PopBack();
1386 if (kUseBakerReadBarrier) {
1387 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1388 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1389 << " is_marked=" << IsMarked(obj);
1390 } else {
1391 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1392 << " is_marked=" << IsMarked(obj);
1393 }
1394 }
1395 }
1396 LOG(FATAL) << "mark stack is not empty";
1397 }
1398 } else {
1399 // Shared, GC-exclusive, or off.
1400 MutexLock mu(Thread::Current(), mark_stack_lock_);
1401 CHECK(gc_mark_stack_->IsEmpty());
1402 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001403 }
1404}
1405
1406void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1407 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1408 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001409 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001410}
1411
1412void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1413 {
1414 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1415 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1416 if (kEnableFromSpaceAccountingCheck) {
1417 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1418 }
1419 heap_->MarkAllocStackAsLive(live_stack);
1420 live_stack->Reset();
1421 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001422 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001423 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1424 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1425 if (space->IsContinuousMemMapAllocSpace()) {
1426 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001427 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001428 continue;
1429 }
1430 TimingLogger::ScopedTiming split2(
1431 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1432 RecordFree(alloc_space->Sweep(swap_bitmaps));
1433 }
1434 }
1435 SweepLargeObjects(swap_bitmaps);
1436}
1437
1438void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1439 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1440 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1441}
1442
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001443void ConcurrentCopying::ReclaimPhase() {
1444 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1445 if (kVerboseMode) {
1446 LOG(INFO) << "GC ReclaimPhase";
1447 }
1448 Thread* self = Thread::Current();
1449
1450 {
1451 // Double-check that the mark stack is empty.
1452 // Note: need to set this after VerifyNoFromSpaceRef().
1453 is_asserting_to_space_invariant_ = false;
1454 QuasiAtomic::ThreadFenceForConstructor();
1455 if (kVerboseMode) {
1456 LOG(INFO) << "Issue an empty check point. ";
1457 }
1458 IssueEmptyCheckpoint();
1459 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001460 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001461 if (kUseBakerReadBarrier) {
1462 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1463 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001464 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001465 }
1466
1467 {
1468 // Record freed objects.
1469 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1470 // Don't include thread-locals that are in the to-space.
1471 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1472 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1473 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1474 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1475 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1476 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1477 if (kEnableFromSpaceAccountingCheck) {
1478 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1479 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1480 }
1481 CHECK_LE(to_objects, from_objects);
1482 CHECK_LE(to_bytes, from_bytes);
1483 int64_t freed_bytes = from_bytes - to_bytes;
1484 int64_t freed_objects = from_objects - to_objects;
1485 if (kVerboseMode) {
1486 LOG(INFO) << "RecordFree:"
1487 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1488 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1489 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1490 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1491 << " from_space size=" << region_space_->FromSpaceSize()
1492 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1493 << " to_space size=" << region_space_->ToSpaceSize();
1494 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1495 }
1496 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1497 if (kVerboseMode) {
1498 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1499 }
1500 }
1501
1502 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001503 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1504 region_space_->ClearFromSpace();
1505 }
1506
1507 {
1508 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001509 Sweep(false);
1510 SwapBitmaps();
1511 heap_->UnBindBitmaps();
1512
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001513 // Delete the region bitmap.
1514 DCHECK(region_space_bitmap_ != nullptr);
1515 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001516 region_space_bitmap_ = nullptr;
1517 }
1518
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001519 CheckEmptyMarkStack();
1520
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001521 if (kVerboseMode) {
1522 LOG(INFO) << "GC end of ReclaimPhase";
1523 }
1524}
1525
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001526// Assert the to-space invariant.
1527void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1528 mirror::Object* ref) {
1529 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1530 if (is_asserting_to_space_invariant_) {
1531 if (region_space_->IsInToSpace(ref)) {
1532 // OK.
1533 return;
1534 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1535 CHECK(region_space_bitmap_->Test(ref)) << ref;
1536 } else if (region_space_->IsInFromSpace(ref)) {
1537 // Not OK. Do extra logging.
1538 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001539 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001540 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001541 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001542 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1543 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001544 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1545 }
1546 }
1547}
1548
1549class RootPrinter {
1550 public:
1551 RootPrinter() { }
1552
1553 template <class MirrorType>
1554 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001555 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001556 if (!root->IsNull()) {
1557 VisitRoot(root);
1558 }
1559 }
1560
1561 template <class MirrorType>
1562 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001563 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001564 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1565 }
1566
1567 template <class MirrorType>
1568 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001569 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001570 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1571 }
1572};
1573
1574void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1575 mirror::Object* ref) {
1576 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1577 if (is_asserting_to_space_invariant_) {
1578 if (region_space_->IsInToSpace(ref)) {
1579 // OK.
1580 return;
1581 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1582 CHECK(region_space_bitmap_->Test(ref)) << ref;
1583 } else if (region_space_->IsInFromSpace(ref)) {
1584 // Not OK. Do extra logging.
1585 if (gc_root_source == nullptr) {
1586 // No info.
1587 } else if (gc_root_source->HasArtField()) {
1588 ArtField* field = gc_root_source->GetArtField();
1589 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1590 RootPrinter root_printer;
1591 field->VisitRoots(root_printer);
1592 } else if (gc_root_source->HasArtMethod()) {
1593 ArtMethod* method = gc_root_source->GetArtMethod();
1594 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1595 RootPrinter root_printer;
Andreas Gampe542451c2016-07-26 09:02:02 -07001596 method->VisitRoots(root_printer, kRuntimePointerSize);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001597 }
1598 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1599 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1600 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1601 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1602 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1603 } else {
1604 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1605 }
1606 }
1607}
1608
1609void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1610 if (kUseBakerReadBarrier) {
1611 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1612 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1613 } else {
1614 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1615 }
1616 if (region_space_->IsInFromSpace(obj)) {
1617 LOG(INFO) << "holder is in the from-space.";
1618 } else if (region_space_->IsInToSpace(obj)) {
1619 LOG(INFO) << "holder is in the to-space.";
1620 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1621 LOG(INFO) << "holder is in the unevac from-space.";
1622 if (region_space_bitmap_->Test(obj)) {
1623 LOG(INFO) << "holder is marked in the region space bitmap.";
1624 } else {
1625 LOG(INFO) << "holder is not marked in the region space bitmap.";
1626 }
1627 } else {
1628 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001629 if (immune_spaces_.ContainsObject(obj)) {
1630 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001631 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001632 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001633 accounting::ContinuousSpaceBitmap* mark_bitmap =
1634 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1635 accounting::LargeObjectBitmap* los_bitmap =
1636 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1637 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1638 bool is_los = mark_bitmap == nullptr;
1639 if (!is_los && mark_bitmap->Test(obj)) {
1640 LOG(INFO) << "holder is marked in the mark bit map.";
1641 } else if (is_los && los_bitmap->Test(obj)) {
1642 LOG(INFO) << "holder is marked in the los bit map.";
1643 } else {
1644 // If ref is on the allocation stack, then it is considered
1645 // mark/alive (but not necessarily on the live stack.)
1646 if (IsOnAllocStack(obj)) {
1647 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001648 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001649 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001650 }
1651 }
1652 }
1653 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001654 LOG(INFO) << "offset=" << offset.SizeValue();
1655}
1656
1657void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1658 mirror::Object* ref) {
1659 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001660 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001661 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001662 // Immune object may not be gray if called from the GC.
1663 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1664 return;
1665 }
1666 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1667 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001668 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001669 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1670 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1671 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001672 }
1673 } else {
1674 accounting::ContinuousSpaceBitmap* mark_bitmap =
1675 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1676 accounting::LargeObjectBitmap* los_bitmap =
1677 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1678 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1679 bool is_los = mark_bitmap == nullptr;
1680 if ((!is_los && mark_bitmap->Test(ref)) ||
1681 (is_los && los_bitmap->Test(ref))) {
1682 // OK.
1683 } else {
1684 // If ref is on the allocation stack, then it may not be
1685 // marked live, but considered marked/alive (but not
1686 // necessarily on the live stack).
1687 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1688 << "obj=" << obj << " ref=" << ref;
1689 }
1690 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001691}
1692
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001693// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001694class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001695 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001696 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001697 : collector_(collector) {}
1698
1699 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001700 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1701 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001702 collector_->Process(obj, offset);
1703 }
1704
1705 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001706 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001707 CHECK(klass->IsTypeOfReferenceClass());
1708 collector_->DelayReferenceReferent(klass, ref);
1709 }
1710
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001711 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001712 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001713 SHARED_REQUIRES(Locks::mutator_lock_) {
1714 if (!root->IsNull()) {
1715 VisitRoot(root);
1716 }
1717 }
1718
1719 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001720 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001721 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001722 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001723 }
1724
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001725 private:
1726 ConcurrentCopying* const collector_;
1727};
1728
1729// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001730inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001731 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001732 // Avoid all read barriers during visit references to help performance.
1733 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1734 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001735 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001736 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001737 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001738 // Disable the read barrier for a performance reason.
1739 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1740 visitor, visitor);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001741 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001742 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1743 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001744}
1745
1746// Process a field.
1747inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001748 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001749 mirror::Object* ref = obj->GetFieldObject<
1750 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001751 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001752 if (to_ref == ref) {
1753 return;
1754 }
1755 // This may fail if the mutator writes to the field at the same time. But it's ok.
1756 mirror::Object* expected_ref = ref;
1757 mirror::Object* new_ref = to_ref;
1758 do {
1759 if (expected_ref !=
1760 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1761 // It was updated by the mutator.
1762 break;
1763 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001764 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001765 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001766}
1767
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001768// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001769inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001770 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1771 for (size_t i = 0; i < count; ++i) {
1772 mirror::Object** root = roots[i];
1773 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001774 mirror::Object* to_ref = Mark(ref);
1775 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001776 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001777 }
1778 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1779 mirror::Object* expected_ref = ref;
1780 mirror::Object* new_ref = to_ref;
1781 do {
1782 if (expected_ref != addr->LoadRelaxed()) {
1783 // It was updated by the mutator.
1784 break;
1785 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001786 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001787 }
1788}
1789
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001790template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001791inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001792 DCHECK(!root->IsNull());
1793 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001794 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001795 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001796 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1797 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1798 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001799 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001800 do {
1801 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1802 // It was updated by the mutator.
1803 break;
1804 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001805 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001806 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001807}
1808
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001809inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001810 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1811 const RootInfo& info ATTRIBUTE_UNUSED) {
1812 for (size_t i = 0; i < count; ++i) {
1813 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1814 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001815 // kGrayImmuneObject is true because this is used for the thread flip.
1816 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001817 }
1818 }
1819}
1820
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001821// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1822class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1823 public:
1824 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1825 : collector_(collector), enabled_(false) {
1826 if (kUseBakerReadBarrier &&
1827 collector_->thread_running_gc_ == Thread::Current() &&
1828 !collector_->gc_grays_immune_objects_) {
1829 collector_->gc_grays_immune_objects_ = true;
1830 enabled_ = true;
1831 }
1832 }
1833
1834 ~ScopedGcGraysImmuneObjects() {
1835 if (kUseBakerReadBarrier &&
1836 collector_->thread_running_gc_ == Thread::Current() &&
1837 enabled_) {
1838 DCHECK(collector_->gc_grays_immune_objects_);
1839 collector_->gc_grays_immune_objects_ = false;
1840 }
1841 }
1842
1843 private:
1844 ConcurrentCopying* const collector_;
1845 bool enabled_;
1846};
1847
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001848// Fill the given memory block with a dummy object. Used to fill in a
1849// copy of objects that was lost in race.
1850void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001851 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1852 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1853 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1854 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001855 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001856 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001857 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
1858 // Explicitly mark to make sure to get an object in the to-space.
1859 mirror::Class* int_array_class = down_cast<mirror::Class*>(
1860 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001861 CHECK(int_array_class != nullptr);
1862 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001863 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001864 CHECK_EQ(component_size, sizeof(int32_t));
1865 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1866 if (data_offset > byte_size) {
1867 // An int array is too big. Use java.lang.Object.
1868 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1869 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001870 CHECK_EQ(byte_size, (java_lang_Object->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001871 dummy_obj->SetClass(java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001872 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001873 } else {
1874 // Use an int array.
1875 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001876 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001877 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001878 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
1879 dummy_arr->SetLength(length);
1880 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001881 << "byte_size=" << byte_size << " length=" << length
1882 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001883 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001884 << "byte_size=" << byte_size << " length=" << length
1885 << " component_size=" << component_size << " data_offset=" << data_offset;
1886 }
1887}
1888
1889// Reuse the memory blocks that were copy of objects that were lost in race.
1890mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1891 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001892 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001893 Thread* self = Thread::Current();
1894 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001895 size_t byte_size;
1896 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001897 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001898 MutexLock mu(self, skipped_blocks_lock_);
1899 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1900 if (it == skipped_blocks_map_.end()) {
1901 // Not found.
1902 return nullptr;
1903 }
1904 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001905 CHECK_GE(byte_size, alloc_size);
1906 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1907 // If remainder would be too small for a dummy object, retry with a larger request size.
1908 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1909 if (it == skipped_blocks_map_.end()) {
1910 // Not found.
1911 return nullptr;
1912 }
Roland Levillain14d90572015-07-16 10:52:26 +01001913 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001914 CHECK_GE(it->first - alloc_size, min_object_size)
1915 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1916 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001917 // Found a block.
1918 CHECK(it != skipped_blocks_map_.end());
1919 byte_size = it->first;
1920 addr = it->second;
1921 CHECK_GE(byte_size, alloc_size);
1922 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1923 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
1924 if (kVerboseMode) {
1925 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1926 }
1927 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001928 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001929 memset(addr, 0, byte_size);
1930 if (byte_size > alloc_size) {
1931 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001932 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001933 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001934 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
1935 // violation and possible deadlock. The deadlock case is a recursive case:
1936 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001937 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1938 byte_size - alloc_size);
1939 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001940 {
1941 MutexLock mu(self, skipped_blocks_lock_);
1942 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1943 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001944 }
1945 return reinterpret_cast<mirror::Object*>(addr);
1946}
1947
1948mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1949 DCHECK(region_space_->IsInFromSpace(from_ref));
1950 // No read barrier to avoid nested RB that might violate the to-space
1951 // invariant. Note that from_ref is a from space ref so the SizeOf()
1952 // call will access the from-space meta objects, but it's ok and necessary.
1953 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1954 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1955 size_t region_space_bytes_allocated = 0U;
1956 size_t non_moving_space_bytes_allocated = 0U;
1957 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001958 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001959 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001960 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001961 bytes_allocated = region_space_bytes_allocated;
1962 if (to_ref != nullptr) {
1963 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1964 }
1965 bool fall_back_to_non_moving = false;
1966 if (UNLIKELY(to_ref == nullptr)) {
1967 // Failed to allocate in the region space. Try the skipped blocks.
1968 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1969 if (to_ref != nullptr) {
1970 // Succeeded to allocate in a skipped block.
1971 if (heap_->use_tlab_) {
1972 // This is necessary for the tlab case as it's not accounted in the space.
1973 region_space_->RecordAlloc(to_ref);
1974 }
1975 bytes_allocated = region_space_alloc_size;
1976 } else {
1977 // Fall back to the non-moving space.
1978 fall_back_to_non_moving = true;
1979 if (kVerboseMode) {
1980 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1981 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1982 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1983 }
1984 fall_back_to_non_moving = true;
1985 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001986 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001987 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1988 bytes_allocated = non_moving_space_bytes_allocated;
1989 // Mark it in the mark bitmap.
1990 accounting::ContinuousSpaceBitmap* mark_bitmap =
1991 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1992 CHECK(mark_bitmap != nullptr);
1993 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1994 }
1995 }
1996 DCHECK(to_ref != nullptr);
1997
1998 // Attempt to install the forward pointer. This is in a loop as the
1999 // lock word atomic write can fail.
2000 while (true) {
2001 // Copy the object. TODO: copy only the lockword in the second iteration and on?
2002 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002003
2004 LockWord old_lock_word = to_ref->GetLockWord(false);
2005
2006 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2007 // Lost the race. Another thread (either GC or mutator) stored
2008 // the forwarding pointer first. Make the lost copy (to_ref)
2009 // look like a valid but dead (dummy) object and keep it for
2010 // future reuse.
2011 FillWithDummyObject(to_ref, bytes_allocated);
2012 if (!fall_back_to_non_moving) {
2013 DCHECK(region_space_->IsInToSpace(to_ref));
2014 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2015 // Free the large alloc.
2016 region_space_->FreeLarge(to_ref, bytes_allocated);
2017 } else {
2018 // Record the lost copy for later reuse.
2019 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2020 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2021 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2022 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2023 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2024 reinterpret_cast<uint8_t*>(to_ref)));
2025 }
2026 } else {
2027 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2028 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2029 // Free the non-moving-space chunk.
2030 accounting::ContinuousSpaceBitmap* mark_bitmap =
2031 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2032 CHECK(mark_bitmap != nullptr);
2033 CHECK(mark_bitmap->Clear(to_ref));
2034 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2035 }
2036
2037 // Get the winner's forward ptr.
2038 mirror::Object* lost_fwd_ptr = to_ref;
2039 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2040 CHECK(to_ref != nullptr);
2041 CHECK_NE(to_ref, lost_fwd_ptr);
2042 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
2043 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2044 return to_ref;
2045 }
2046
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002047 // Set the gray ptr.
2048 if (kUseBakerReadBarrier) {
2049 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
2050 }
2051
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002052 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2053
2054 // Try to atomically write the fwd ptr.
2055 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
2056 if (LIKELY(success)) {
2057 // The CAS succeeded.
2058 objects_moved_.FetchAndAddSequentiallyConsistent(1);
2059 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
2060 if (LIKELY(!fall_back_to_non_moving)) {
2061 DCHECK(region_space_->IsInToSpace(to_ref));
2062 } else {
2063 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2064 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2065 }
2066 if (kUseBakerReadBarrier) {
2067 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2068 }
2069 DCHECK(GetFwdPtr(from_ref) == to_ref);
2070 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002071 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002072 return to_ref;
2073 } else {
2074 // The CAS failed. It may have lost the race or may have failed
2075 // due to monitor/hashcode ops. Either way, retry.
2076 }
2077 }
2078}
2079
2080mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2081 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002082 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2083 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002084 // It's already marked.
2085 return from_ref;
2086 }
2087 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002088 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002089 to_ref = GetFwdPtr(from_ref);
2090 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2091 heap_->non_moving_space_->HasAddress(to_ref))
2092 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002093 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002094 if (region_space_bitmap_->Test(from_ref)) {
2095 to_ref = from_ref;
2096 } else {
2097 to_ref = nullptr;
2098 }
2099 } else {
2100 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002101 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002102 // An immune object is alive.
2103 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002104 } else {
2105 // Non-immune non-moving space. Use the mark bitmap.
2106 accounting::ContinuousSpaceBitmap* mark_bitmap =
2107 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2108 accounting::LargeObjectBitmap* los_bitmap =
2109 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2110 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2111 bool is_los = mark_bitmap == nullptr;
2112 if (!is_los && mark_bitmap->Test(from_ref)) {
2113 // Already marked.
2114 to_ref = from_ref;
2115 } else if (is_los && los_bitmap->Test(from_ref)) {
2116 // Already marked in LOS.
2117 to_ref = from_ref;
2118 } else {
2119 // Not marked.
2120 if (IsOnAllocStack(from_ref)) {
2121 // If on the allocation stack, it's considered marked.
2122 to_ref = from_ref;
2123 } else {
2124 // Not marked.
2125 to_ref = nullptr;
2126 }
2127 }
2128 }
2129 }
2130 return to_ref;
2131}
2132
2133bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2134 QuasiAtomic::ThreadFenceAcquire();
2135 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002136 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002137}
2138
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002139mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2140 // ref is in a non-moving space (from_ref == to_ref).
2141 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002142 DCHECK(!immune_spaces_.ContainsObject(ref));
2143 // Use the mark bitmap.
2144 accounting::ContinuousSpaceBitmap* mark_bitmap =
2145 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2146 accounting::LargeObjectBitmap* los_bitmap =
2147 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2148 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2149 bool is_los = mark_bitmap == nullptr;
2150 if (!is_los && mark_bitmap->Test(ref)) {
2151 // Already marked.
2152 if (kUseBakerReadBarrier) {
2153 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2154 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002155 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002156 } else if (is_los && los_bitmap->Test(ref)) {
2157 // Already marked in LOS.
2158 if (kUseBakerReadBarrier) {
2159 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2160 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2161 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002162 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002163 // Not marked.
2164 if (IsOnAllocStack(ref)) {
2165 // If it's on the allocation stack, it's considered marked. Keep it white.
2166 // Objects on the allocation stack need not be marked.
2167 if (!is_los) {
2168 DCHECK(!mark_bitmap->Test(ref));
2169 } else {
2170 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002171 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002172 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002173 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002174 }
2175 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002176 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2177 // kRegionTypeUnevacFromSpace-case comment in Mark().
2178 if (kUseBakerReadBarrier) {
2179 // Test the bitmap first to reduce the chance of false gray cases.
2180 if ((!is_los && mark_bitmap->Test(ref)) ||
2181 (is_los && los_bitmap->Test(ref))) {
2182 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002183 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002184 }
2185 // Not marked or on the allocation stack. Try to mark it.
2186 // This may or may not succeed, which is ok.
2187 bool cas_success = false;
2188 if (kUseBakerReadBarrier) {
2189 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
2190 ReadBarrier::GrayPtr());
2191 }
2192 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2193 // Already marked.
2194 if (kUseBakerReadBarrier && cas_success &&
2195 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2196 PushOntoFalseGrayStack(ref);
2197 }
2198 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2199 // Already marked in LOS.
2200 if (kUseBakerReadBarrier && cas_success &&
2201 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2202 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002203 }
2204 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002205 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002206 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002207 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002208 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002209 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002210 }
2211 }
2212 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002213 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002214}
2215
2216void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002217 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002218 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002219 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002220 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2221 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002222 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002223 {
2224 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2225 skipped_blocks_map_.clear();
2226 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002227 {
2228 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002229 {
2230 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2231 heap_->ClearMarkedObjects();
2232 }
2233 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2234 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2235 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2236 gc::Heap* const heap = Runtime::Current()->GetHeap();
2237 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2238 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2239 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
2240 // Filter out cards that don't need to be set.
2241 if (table != nullptr) {
2242 table->FilterCards();
2243 }
2244 }
2245 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002246 }
2247 if (measure_read_barrier_slow_path_) {
2248 MutexLock mu(self, rb_slow_path_histogram_lock_);
2249 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2250 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2251 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2252 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002253}
2254
Mathieu Chartier97509952015-07-13 14:35:43 -07002255bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002256 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002257 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002258 if (to_ref == nullptr) {
2259 return false;
2260 }
2261 if (from_ref != to_ref) {
2262 QuasiAtomic::ThreadFenceRelease();
2263 field->Assign(to_ref);
2264 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2265 }
2266 return true;
2267}
2268
Mathieu Chartier97509952015-07-13 14:35:43 -07002269mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2270 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002271}
2272
2273void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002274 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002275}
2276
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002277void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002278 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002279 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002280 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2281 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002282 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002283}
2284
2285void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2286 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2287 region_space_->RevokeAllThreadLocalBuffers();
2288}
2289
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002290mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2291 if (Thread::Current() != thread_running_gc_) {
2292 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2293 } else {
2294 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2295 }
2296 ScopedTrace tr(__FUNCTION__);
2297 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2298 mirror::Object* ret = Mark(from_ref);
2299 if (measure_read_barrier_slow_path_) {
2300 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2301 }
2302 return ret;
2303}
2304
2305void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2306 GarbageCollector::DumpPerformanceInfo(os);
2307 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2308 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2309 Histogram<uint64_t>::CumulativeData cumulative_data;
2310 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2311 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2312 }
2313 if (rb_slow_path_count_total_ > 0) {
2314 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2315 }
2316 if (rb_slow_path_count_gc_total_ > 0) {
2317 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2318 }
2319}
2320
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002321} // namespace collector
2322} // namespace gc
2323} // namespace art