blob: fa86ec7e87c724a881638ccac63c76503e2c0df4 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070020#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070021#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070022#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070023#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070025#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080026#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070027#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080029#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080030#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080031#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070032#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033#include "mirror/object-inl.h"
34#include "scoped_thread_state_change.h"
35#include "thread-inl.h"
36#include "thread_list.h"
37#include "well_known_classes.h"
38
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070039namespace art {
40namespace gc {
41namespace collector {
42
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070043static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070044// If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
45// pages.
46static constexpr bool kGrayDirtyImmuneObjects = true;
47// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
48// union table. Disabled since it does not seem to help the pause much.
49static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070050// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
51// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
52// Only enabled for kIsDebugBuild to avoid performance hit.
53static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070054
Mathieu Chartier56fe2582016-07-14 13:30:03 -070055ConcurrentCopying::ConcurrentCopying(Heap* heap,
56 const std::string& name_prefix,
57 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 : GarbageCollector(heap,
59 name_prefix + (name_prefix.empty() ? "" : " ") +
60 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070061 region_space_(nullptr), gc_barrier_(new Barrier(0)),
62 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070063 kDefaultGcMarkStackSize,
64 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
66 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080067 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070068 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070069 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
70 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080071 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070072 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
73 rb_slow_path_ns_(0),
74 rb_slow_path_count_(0),
75 rb_slow_path_count_gc_(0),
76 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
77 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
78 rb_slow_path_count_total_(0),
79 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070081 force_evacuate_all_(false),
82 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
83 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
85 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070086 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080087 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080088 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
89 // Cache this so that we won't have to lock heap_bitmap_lock_ in
90 // Mark() which could cause a nested lock on heap_bitmap_lock_
91 // when GC causes a RB while doing GC or a lock order violation
92 // (class_linker_lock_ and heap_bitmap_lock_).
93 heap_mark_bitmap_ = heap->GetMarkBitmap();
94 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070095 {
96 MutexLock mu(self, mark_stack_lock_);
97 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
98 accounting::AtomicStack<mirror::Object>* mark_stack =
99 accounting::AtomicStack<mirror::Object>::Create(
100 "thread local mark stack", kMarkStackSize, kMarkStackSize);
101 pooled_mark_stacks_.push_back(mark_stack);
102 }
103 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800104}
105
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700106void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
107 // Used for preserving soft references, should be OK to not have a CAS here since there should be
108 // no other threads which can trigger read barriers on the same referent during reference
109 // processing.
110 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700111 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700112}
113
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800114ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700115 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800116}
117
118void ConcurrentCopying::RunPhases() {
119 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
120 CHECK(!is_active_);
121 is_active_ = true;
122 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700123 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800124 Locks::mutator_lock_->AssertNotHeld(self);
125 {
126 ReaderMutexLock mu(self, *Locks::mutator_lock_);
127 InitializePhase();
128 }
129 FlipThreadRoots();
130 {
131 ReaderMutexLock mu(self, *Locks::mutator_lock_);
132 MarkingPhase();
133 }
134 // Verify no from space refs. This causes a pause.
135 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
136 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
137 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700138 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800139 if (kVerboseMode) {
140 LOG(INFO) << "Verifying no from-space refs";
141 }
142 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700143 if (kVerboseMode) {
144 LOG(INFO) << "Done verifying no from-space refs";
145 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700146 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800147 }
148 {
149 ReaderMutexLock mu(self, *Locks::mutator_lock_);
150 ReclaimPhase();
151 }
152 FinishPhase();
153 CHECK(is_active_);
154 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700155 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800156}
157
158void ConcurrentCopying::BindBitmaps() {
159 Thread* self = Thread::Current();
160 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
161 // Mark all of the spaces we never collect as immune.
162 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800163 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
164 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800165 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800166 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800167 } else if (space == region_space_) {
168 accounting::ContinuousSpaceBitmap* bitmap =
169 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
170 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800171 region_space_bitmap_ = bitmap;
172 }
173 }
174}
175
176void ConcurrentCopying::InitializePhase() {
177 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
178 if (kVerboseMode) {
179 LOG(INFO) << "GC InitializePhase";
180 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
181 << reinterpret_cast<void*>(region_space_->Limit());
182 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700183 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800184 if (kIsDebugBuild) {
185 MutexLock mu(Thread::Current(), mark_stack_lock_);
186 CHECK(false_gray_stack_.empty());
187 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700188
189 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
190 if (measure_read_barrier_slow_path_) {
191 rb_slow_path_ns_.StoreRelaxed(0);
192 rb_slow_path_count_.StoreRelaxed(0);
193 rb_slow_path_count_gc_.StoreRelaxed(0);
194 }
195
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800196 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800197 bytes_moved_.StoreRelaxed(0);
198 objects_moved_.StoreRelaxed(0);
199 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
200 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
201 GetCurrentIteration()->GetClearSoftReferences()) {
202 force_evacuate_all_ = true;
203 } else {
204 force_evacuate_all_ = false;
205 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700206 if (kUseBakerReadBarrier) {
207 updated_all_immune_objects_.StoreRelaxed(false);
208 // GC may gray immune objects in the thread flip.
209 gc_grays_immune_objects_ = true;
210 if (kIsDebugBuild) {
211 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
212 DCHECK(immune_gray_stack_.empty());
213 }
214 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800215 BindBitmaps();
216 if (kVerboseMode) {
217 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800218 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
219 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
220 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
221 LOG(INFO) << "Immune space: " << *space;
222 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800223 LOG(INFO) << "GC end of InitializePhase";
224 }
225}
226
227// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700228class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100230 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
232 }
233
Mathieu Chartier90443472015-07-16 20:32:27 -0700234 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800235 // Note: self is not necessarily equal to thread since thread may be suspended.
236 Thread* self = Thread::Current();
237 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
238 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700239 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800240 if (use_tlab_ && thread->HasTlab()) {
241 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
242 // This must come before the revoke.
243 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
244 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
245 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
246 FetchAndAddSequentiallyConsistent(thread_local_objects);
247 } else {
248 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
249 }
250 }
251 if (kUseThreadLocalAllocationStack) {
252 thread->RevokeThreadLocalAllocationStack();
253 }
254 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700255 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
256 // only.
257 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800258 concurrent_copying_->GetBarrier().Pass(self);
259 }
260
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700261 void VisitRoots(mirror::Object*** roots,
262 size_t count,
263 const RootInfo& info ATTRIBUTE_UNUSED)
264 SHARED_REQUIRES(Locks::mutator_lock_) {
265 for (size_t i = 0; i < count; ++i) {
266 mirror::Object** root = roots[i];
267 mirror::Object* ref = *root;
268 if (ref != nullptr) {
269 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
270 if (to_ref != ref) {
271 *root = to_ref;
272 }
273 }
274 }
275 }
276
277 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
278 size_t count,
279 const RootInfo& info ATTRIBUTE_UNUSED)
280 SHARED_REQUIRES(Locks::mutator_lock_) {
281 for (size_t i = 0; i < count; ++i) {
282 mirror::CompressedReference<mirror::Object>* const root = roots[i];
283 if (!root->IsNull()) {
284 mirror::Object* ref = root->AsMirrorPtr();
285 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
286 if (to_ref != ref) {
287 root->Assign(to_ref);
288 }
289 }
290 }
291 }
292
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800293 private:
294 ConcurrentCopying* const concurrent_copying_;
295 const bool use_tlab_;
296};
297
298// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700299class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800300 public:
301 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
302 : concurrent_copying_(concurrent_copying) {
303 }
304
Mathieu Chartier90443472015-07-16 20:32:27 -0700305 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800306 ConcurrentCopying* cc = concurrent_copying_;
307 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
308 // Note: self is not necessarily equal to thread since thread may be suspended.
309 Thread* self = Thread::Current();
310 CHECK(thread == self);
311 Locks::mutator_lock_->AssertExclusiveHeld(self);
312 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700313 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800314 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
315 cc->RecordLiveStackFreezeSize(self);
316 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
317 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
318 }
319 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700320 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800321 if (kIsDebugBuild) {
322 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
323 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800324 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800325 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800326 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700327 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800328 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700329 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
330 cc->GrayAllDirtyImmuneObjects();
331 if (kIsDebugBuild) {
332 // Check that all non-gray immune objects only refernce immune objects.
333 cc->VerifyGrayImmuneObjects();
334 }
335 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 }
337
338 private:
339 ConcurrentCopying* const concurrent_copying_;
340};
341
Mathieu Chartier21328a12016-07-22 10:47:45 -0700342class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
343 public:
344 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
345 : collector_(collector) {}
346
347 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
348 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
349 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
350 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
351 obj, offset);
352 }
353
354 void operator()(mirror::Class* klass, mirror::Reference* ref) const
355 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
356 CHECK(klass->IsTypeOfReferenceClass());
357 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
358 ref,
359 mirror::Reference::ReferentOffset());
360 }
361
362 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
363 ALWAYS_INLINE
364 SHARED_REQUIRES(Locks::mutator_lock_) {
365 if (!root->IsNull()) {
366 VisitRoot(root);
367 }
368 }
369
370 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
371 ALWAYS_INLINE
372 SHARED_REQUIRES(Locks::mutator_lock_) {
373 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
374 }
375
376 private:
377 ConcurrentCopying* const collector_;
378
379 void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
380 SHARED_REQUIRES(Locks::mutator_lock_) {
381 if (ref != nullptr) {
382 CHECK(collector_->immune_spaces_.ContainsObject(ref))
383 << "Non gray object references non immune object "<< ref << " " << PrettyTypeOf(ref)
384 << " in holder " << holder << " " << PrettyTypeOf(holder) << " offset="
385 << offset.Uint32Value();
386 }
387 }
388};
389
390void ConcurrentCopying::VerifyGrayImmuneObjects() {
391 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
392 for (auto& space : immune_spaces_.GetSpaces()) {
393 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
394 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
395 VerifyGrayImmuneObjectsVisitor visitor(this);
396 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
397 reinterpret_cast<uintptr_t>(space->Limit()),
398 [&visitor](mirror::Object* obj)
399 SHARED_REQUIRES(Locks::mutator_lock_) {
400 // If an object is not gray, it should only have references to things in the immune spaces.
401 if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
402 obj->VisitReferences</*kVisitNativeRoots*/true,
403 kDefaultVerifyFlags,
404 kWithoutReadBarrier>(visitor, visitor);
405 }
406 });
407 }
408}
409
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800410// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
411void ConcurrentCopying::FlipThreadRoots() {
412 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
413 if (kVerboseMode) {
414 LOG(INFO) << "time=" << region_space_->Time();
415 region_space_->DumpNonFreeRegions(LOG(INFO));
416 }
417 Thread* self = Thread::Current();
418 Locks::mutator_lock_->AssertNotHeld(self);
419 gc_barrier_->Init(self, 0);
420 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
421 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700422 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800423 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
424 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700425 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800426 {
427 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
428 gc_barrier_->Increment(self, barrier_count);
429 }
430 is_asserting_to_space_invariant_ = true;
431 QuasiAtomic::ThreadFenceForConstructor();
432 if (kVerboseMode) {
433 LOG(INFO) << "time=" << region_space_->Time();
434 region_space_->DumpNonFreeRegions(LOG(INFO));
435 LOG(INFO) << "GC end of FlipThreadRoots";
436 }
437}
438
Mathieu Chartier21328a12016-07-22 10:47:45 -0700439class ConcurrentCopying::GrayImmuneObjectVisitor {
440 public:
441 explicit GrayImmuneObjectVisitor() {}
442
443 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
444 if (kUseBakerReadBarrier) {
445 if (kIsDebugBuild) {
446 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
447 }
448 obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
449 }
450 }
451
452 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
453 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
454 }
455};
456
457void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
458 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
459 gc::Heap* const heap = Runtime::Current()->GetHeap();
460 accounting::CardTable* const card_table = heap->GetCardTable();
461 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
462 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
463 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
464 GrayImmuneObjectVisitor visitor;
465 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
466 // Mark all the objects on dirty cards since these may point to objects in other space.
467 // Once these are marked, the GC will eventually clear them later.
468 // Table is non null for boot image and zygote spaces. It is only null for application image
469 // spaces.
470 if (table != nullptr) {
471 // TODO: Add preclean outside the pause.
472 table->ClearCards();
473 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
474 } else {
475 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
476 // pause because app image spaces are all dirty pages anyways.
477 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
478 }
479 }
480 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
481 // barriers in the immune spaces.
482 updated_all_immune_objects_.StoreRelaxed(true);
483}
484
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700485void ConcurrentCopying::SwapStacks() {
486 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800487}
488
489void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
490 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
491 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
492}
493
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800494class EmptyCheckpoint : public Closure {
495 public:
496 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
497 : concurrent_copying_(concurrent_copying) {
498 }
499
500 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
501 // Note: self is not necessarily equal to thread since thread may be suspended.
502 Thread* self = Thread::Current();
503 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
504 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800505 // If thread is a running mutator, then act on behalf of the garbage collector.
506 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700507 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800508 }
509
510 private:
511 ConcurrentCopying* const concurrent_copying_;
512};
513
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700514// Used to visit objects in the immune spaces.
515inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
516 DCHECK(obj != nullptr);
517 DCHECK(immune_spaces_.ContainsObject(obj));
518 // Update the fields without graying it or pushing it onto the mark stack.
519 Scan(obj);
520}
521
522class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
523 public:
524 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
525 : collector_(cc) {}
526
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700527 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700528 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
529 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
530 collector_->ScanImmuneObject(obj);
531 // Done scanning the object, go back to white.
532 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
533 ReadBarrier::WhitePtr());
534 CHECK(success);
535 }
536 } else {
537 collector_->ScanImmuneObject(obj);
538 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700539 }
540
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700541 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
542 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
543 }
544
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700545 private:
546 ConcurrentCopying* const collector_;
547};
548
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800549// Concurrently mark roots that are guarded by read barriers and process the mark stack.
550void ConcurrentCopying::MarkingPhase() {
551 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
552 if (kVerboseMode) {
553 LOG(INFO) << "GC MarkingPhase";
554 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700555 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700556
557 // Scan immune spaces.
558 // Update all the fields in the immune spaces first without graying the objects so that we
559 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
560 // of the objects.
561 if (kUseBakerReadBarrier) {
562 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700563 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700564 {
565 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
566 for (auto& space : immune_spaces_.GetSpaces()) {
567 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
568 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700569 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700570 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700571 if (table != nullptr) {
572 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
573 } else {
574 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
575 reinterpret_cast<uintptr_t>(space->Limit()),
576 visitor);
577 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700578 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700579 }
580 if (kUseBakerReadBarrier) {
581 // This release fence makes the field updates in the above loop visible before allowing mutator
582 // getting access to immune objects without graying it first.
583 updated_all_immune_objects_.StoreRelease(true);
584 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
585 // the above loop because we would incorrectly disable the read barrier by whitening an object
586 // which may point to an unscanned, white object, breaking the to-space invariant.
587 //
588 // Make sure no mutators are in the middle of marking an immune object before whitening immune
589 // objects.
590 IssueEmptyCheckpoint();
591 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
592 if (kVerboseMode) {
593 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
594 }
595 for (mirror::Object* obj : immune_gray_stack_) {
596 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
597 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
598 ReadBarrier::WhitePtr());
599 DCHECK(success);
600 }
601 immune_gray_stack_.clear();
602 }
603
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800604 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700605 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
606 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800607 }
608 {
609 // TODO: don't visit the transaction roots if it's not active.
610 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700611 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800612 }
613
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800614 Thread* self = Thread::Current();
615 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700616 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700617 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
618 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
619 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
620 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
621 // reach the point where we process weak references, we can avoid using a lock when accessing
622 // the GC mark stack, which makes mark stack processing more efficient.
623
624 // Process the mark stack once in the thread local stack mode. This marks most of the live
625 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
626 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
627 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700629 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
630 // for the last time before transitioning to the shared mark stack mode, which would process new
631 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
632 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
633 // important to do these together in a single checkpoint so that we can ensure that mutators
634 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
635 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
636 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
637 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
638 SwitchToSharedMarkStackMode();
639 CHECK(!self->GetWeakRefAccessEnabled());
640 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
641 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
642 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
643 // (via read barriers) have no way to produce any more refs to process. Marking converges once
644 // before we process weak refs below.
645 ProcessMarkStack();
646 CheckEmptyMarkStack();
647 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
648 // lock from this point on.
649 SwitchToGcExclusiveMarkStackMode();
650 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800651 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800652 LOG(INFO) << "ProcessReferences";
653 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700654 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700655 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700656 ProcessReferences(self);
657 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800658 if (kVerboseMode) {
659 LOG(INFO) << "SweepSystemWeaks";
660 }
661 SweepSystemWeaks(self);
662 if (kVerboseMode) {
663 LOG(INFO) << "SweepSystemWeaks done";
664 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700665 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
666 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
667 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800668 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700669 CheckEmptyMarkStack();
670 // Re-enable weak ref accesses.
671 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700672 // Free data for class loaders that we unloaded.
673 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700674 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700675 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800676 if (kUseBakerReadBarrier) {
677 ProcessFalseGrayStack();
678 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700679 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800680 }
681
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700682 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800683 if (kVerboseMode) {
684 LOG(INFO) << "GC end of MarkingPhase";
685 }
686}
687
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700688void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
689 if (kVerboseMode) {
690 LOG(INFO) << "ReenableWeakRefAccess";
691 }
692 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
693 QuasiAtomic::ThreadFenceForConstructor();
694 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
695 {
696 MutexLock mu(self, *Locks::thread_list_lock_);
697 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
698 for (Thread* thread : thread_list) {
699 thread->SetWeakRefAccessEnabled(true);
700 }
701 }
702 // Unblock blocking threads.
703 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
704 Runtime::Current()->BroadcastForNewSystemWeaks();
705}
706
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700707class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700708 public:
709 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
710 : concurrent_copying_(concurrent_copying) {
711 }
712
713 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
714 // Note: self is not necessarily equal to thread since thread may be suspended.
715 Thread* self = Thread::Current();
716 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
717 << thread->GetState() << " thread " << thread << " self " << self;
718 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700719 // Note a thread that has just started right before this checkpoint may have already this flag
720 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700721 thread->SetIsGcMarking(false);
722 // If thread is a running mutator, then act on behalf of the garbage collector.
723 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700724 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700725 }
726
727 private:
728 ConcurrentCopying* const concurrent_copying_;
729};
730
731void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
732 Thread* self = Thread::Current();
733 DisableMarkingCheckpoint check_point(this);
734 ThreadList* thread_list = Runtime::Current()->GetThreadList();
735 gc_barrier_->Init(self, 0);
736 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
737 // If there are no threads to wait which implies that all the checkpoint functions are finished,
738 // then no need to release the mutator lock.
739 if (barrier_count == 0) {
740 return;
741 }
742 // Release locks then wait for all mutator threads to pass the barrier.
743 Locks::mutator_lock_->SharedUnlock(self);
744 {
745 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
746 gc_barrier_->Increment(self, barrier_count);
747 }
748 Locks::mutator_lock_->SharedLock(self);
749}
750
751void ConcurrentCopying::DisableMarking() {
752 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
753 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
754 is_marking_ = false;
755 QuasiAtomic::ThreadFenceForConstructor();
756 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
757 // still in the middle of a read barrier which may have a from-space ref cached in a local
758 // variable.
759 IssueDisableMarkingCheckpoint();
760 if (kUseTableLookupReadBarrier) {
761 heap_->rb_table_->ClearAll();
762 DCHECK(heap_->rb_table_->IsAllCleared());
763 }
764 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
765 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
766}
767
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800768void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
769 CHECK(kUseBakerReadBarrier);
770 DCHECK(ref != nullptr);
771 MutexLock mu(Thread::Current(), mark_stack_lock_);
772 false_gray_stack_.push_back(ref);
773}
774
775void ConcurrentCopying::ProcessFalseGrayStack() {
776 CHECK(kUseBakerReadBarrier);
777 // Change the objects on the false gray stack from gray to white.
778 MutexLock mu(Thread::Current(), mark_stack_lock_);
779 for (mirror::Object* obj : false_gray_stack_) {
780 DCHECK(IsMarked(obj));
781 // The object could be white here if a thread got preempted after a success at the
782 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
783 // still gray), and the thread ran to register it onto the false gray stack.
784 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
785 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
786 ReadBarrier::WhitePtr());
787 DCHECK(success);
788 }
789 }
790 false_gray_stack_.clear();
791}
792
793
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800794void ConcurrentCopying::IssueEmptyCheckpoint() {
795 Thread* self = Thread::Current();
796 EmptyCheckpoint check_point(this);
797 ThreadList* thread_list = Runtime::Current()->GetThreadList();
798 gc_barrier_->Init(self, 0);
799 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800800 // If there are no threads to wait which implys that all the checkpoint functions are finished,
801 // then no need to release the mutator lock.
802 if (barrier_count == 0) {
803 return;
804 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800805 // Release locks then wait for all mutator threads to pass the barrier.
806 Locks::mutator_lock_->SharedUnlock(self);
807 {
808 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
809 gc_barrier_->Increment(self, barrier_count);
810 }
811 Locks::mutator_lock_->SharedLock(self);
812}
813
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700814void ConcurrentCopying::ExpandGcMarkStack() {
815 DCHECK(gc_mark_stack_->IsFull());
816 const size_t new_size = gc_mark_stack_->Capacity() * 2;
817 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
818 gc_mark_stack_->End());
819 gc_mark_stack_->Resize(new_size);
820 for (auto& ref : temp) {
821 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
822 }
823 DCHECK(!gc_mark_stack_->IsFull());
824}
825
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800826void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700827 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800828 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700829 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
830 CHECK(thread_running_gc_ != nullptr);
831 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700832 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
833 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700834 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
835 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700836 if (UNLIKELY(gc_mark_stack_->IsFull())) {
837 ExpandGcMarkStack();
838 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700839 gc_mark_stack_->PushBack(to_ref);
840 } else {
841 // Otherwise, use a thread-local mark stack.
842 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
843 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
844 MutexLock mu(self, mark_stack_lock_);
845 // Get a new thread local mark stack.
846 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
847 if (!pooled_mark_stacks_.empty()) {
848 // Use a pooled mark stack.
849 new_tl_mark_stack = pooled_mark_stacks_.back();
850 pooled_mark_stacks_.pop_back();
851 } else {
852 // None pooled. Create a new one.
853 new_tl_mark_stack =
854 accounting::AtomicStack<mirror::Object>::Create(
855 "thread local mark stack", 4 * KB, 4 * KB);
856 }
857 DCHECK(new_tl_mark_stack != nullptr);
858 DCHECK(new_tl_mark_stack->IsEmpty());
859 new_tl_mark_stack->PushBack(to_ref);
860 self->SetThreadLocalMarkStack(new_tl_mark_stack);
861 if (tl_mark_stack != nullptr) {
862 // Store the old full stack into a vector.
863 revoked_mark_stacks_.push_back(tl_mark_stack);
864 }
865 } else {
866 tl_mark_stack->PushBack(to_ref);
867 }
868 }
869 } else if (mark_stack_mode == kMarkStackModeShared) {
870 // Access the shared GC mark stack with a lock.
871 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700872 if (UNLIKELY(gc_mark_stack_->IsFull())) {
873 ExpandGcMarkStack();
874 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700875 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800876 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700877 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700878 static_cast<uint32_t>(kMarkStackModeGcExclusive))
879 << "ref=" << to_ref
880 << " self->gc_marking=" << self->GetIsGcMarking()
881 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700882 CHECK(self == thread_running_gc_)
883 << "Only GC-running thread should access the mark stack "
884 << "in the GC exclusive mark stack mode";
885 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700886 if (UNLIKELY(gc_mark_stack_->IsFull())) {
887 ExpandGcMarkStack();
888 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700889 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800890 }
891}
892
893accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
894 return heap_->allocation_stack_.get();
895}
896
897accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
898 return heap_->live_stack_.get();
899}
900
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800901// The following visitors are used to verify that there's no references to the from-space left after
902// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700903class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800904 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700905 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800906 : collector_(collector) {}
907
908 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700909 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800910 if (ref == nullptr) {
911 // OK.
912 return;
913 }
914 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
915 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800916 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
917 << "Ref " << ref << " " << PrettyTypeOf(ref)
918 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800919 }
920 }
921
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700922 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700923 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800924 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700925 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800926 }
927
928 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700929 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800930};
931
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700932class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800933 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700934 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800935 : collector_(collector) {}
936
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700937 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700938 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800939 mirror::Object* ref =
940 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700941 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800942 visitor(ref);
943 }
944 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700945 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800946 CHECK(klass->IsTypeOfReferenceClass());
947 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
948 }
949
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700950 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
951 SHARED_REQUIRES(Locks::mutator_lock_) {
952 if (!root->IsNull()) {
953 VisitRoot(root);
954 }
955 }
956
957 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
958 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700959 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700960 visitor(root->AsMirrorPtr());
961 }
962
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800963 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700964 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800965};
966
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700967class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800968 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700969 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800970 : collector_(collector) {}
971 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700972 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800973 ObjectCallback(obj, collector_);
974 }
975 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700976 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800977 CHECK(obj != nullptr);
978 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
979 space::RegionSpace* region_space = collector->RegionSpace();
980 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700981 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700982 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800983 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800984 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
985 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800986 }
987 }
988
989 private:
990 ConcurrentCopying* const collector_;
991};
992
993// Verify there's no from-space references left after the marking phase.
994void ConcurrentCopying::VerifyNoFromSpaceReferences() {
995 Thread* self = Thread::Current();
996 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700997 // Verify all threads have is_gc_marking to be false
998 {
999 MutexLock mu(self, *Locks::thread_list_lock_);
1000 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1001 for (Thread* thread : thread_list) {
1002 CHECK(!thread->GetIsGcMarking());
1003 }
1004 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001005 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001006 // Roots.
1007 {
1008 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001009 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001010 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001011 }
1012 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001013 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001014 // Non-moving spaces.
1015 {
1016 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1017 heap_->GetMarkBitmap()->Visit(visitor);
1018 }
1019 // The alloc stack.
1020 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001021 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001022 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1023 it < end; ++it) {
1024 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001025 if (obj != nullptr && obj->GetClass() != nullptr) {
1026 // TODO: need to call this only if obj is alive?
1027 ref_visitor(obj);
1028 visitor(obj);
1029 }
1030 }
1031 }
1032 // TODO: LOS. But only refs in LOS are classes.
1033}
1034
1035// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001036class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001037 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001038 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001039 : collector_(collector) {}
1040
1041 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001042 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001043 if (ref == nullptr) {
1044 // OK.
1045 return;
1046 }
1047 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1048 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001049
1050 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001051 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001052};
1053
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001054class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001055 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001056 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001057 : collector_(collector) {}
1058
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001059 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001060 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001061 mirror::Object* ref =
1062 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001063 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001064 visitor(ref);
1065 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001066 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001067 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001068 CHECK(klass->IsTypeOfReferenceClass());
1069 }
1070
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001071 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1072 SHARED_REQUIRES(Locks::mutator_lock_) {
1073 if (!root->IsNull()) {
1074 VisitRoot(root);
1075 }
1076 }
1077
1078 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1079 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001080 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001081 visitor(root->AsMirrorPtr());
1082 }
1083
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001084 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001085 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001086};
1087
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001088class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001089 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001090 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001091 : collector_(collector) {}
1092 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001093 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001094 ObjectCallback(obj, collector_);
1095 }
1096 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001097 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001098 CHECK(obj != nullptr);
1099 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1100 space::RegionSpace* region_space = collector->RegionSpace();
1101 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1102 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001103 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001104 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001105 }
1106
1107 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001108 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001109};
1110
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001111class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001112 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001113 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1114 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001115 : concurrent_copying_(concurrent_copying),
1116 disable_weak_ref_access_(disable_weak_ref_access) {
1117 }
1118
1119 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1120 // Note: self is not necessarily equal to thread since thread may be suspended.
1121 Thread* self = Thread::Current();
1122 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1123 << thread->GetState() << " thread " << thread << " self " << self;
1124 // Revoke thread local mark stacks.
1125 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1126 if (tl_mark_stack != nullptr) {
1127 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1128 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1129 thread->SetThreadLocalMarkStack(nullptr);
1130 }
1131 // Disable weak ref access.
1132 if (disable_weak_ref_access_) {
1133 thread->SetWeakRefAccessEnabled(false);
1134 }
1135 // If thread is a running mutator, then act on behalf of the garbage collector.
1136 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001137 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001138 }
1139
1140 private:
1141 ConcurrentCopying* const concurrent_copying_;
1142 const bool disable_weak_ref_access_;
1143};
1144
1145void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
1146 Thread* self = Thread::Current();
1147 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1148 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1149 gc_barrier_->Init(self, 0);
1150 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1151 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1152 // then no need to release the mutator lock.
1153 if (barrier_count == 0) {
1154 return;
1155 }
1156 Locks::mutator_lock_->SharedUnlock(self);
1157 {
1158 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1159 gc_barrier_->Increment(self, barrier_count);
1160 }
1161 Locks::mutator_lock_->SharedLock(self);
1162}
1163
1164void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1165 Thread* self = Thread::Current();
1166 CHECK_EQ(self, thread);
1167 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1168 if (tl_mark_stack != nullptr) {
1169 CHECK(is_marking_);
1170 MutexLock mu(self, mark_stack_lock_);
1171 revoked_mark_stacks_.push_back(tl_mark_stack);
1172 thread->SetThreadLocalMarkStack(nullptr);
1173 }
1174}
1175
1176void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001177 if (kVerboseMode) {
1178 LOG(INFO) << "ProcessMarkStack. ";
1179 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001180 bool empty_prev = false;
1181 while (true) {
1182 bool empty = ProcessMarkStackOnce();
1183 if (empty_prev && empty) {
1184 // Saw empty mark stack for a second time, done.
1185 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001186 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001187 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001188 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001189}
1190
1191bool ConcurrentCopying::ProcessMarkStackOnce() {
1192 Thread* self = Thread::Current();
1193 CHECK(thread_running_gc_ != nullptr);
1194 CHECK(self == thread_running_gc_);
1195 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1196 size_t count = 0;
1197 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1198 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1199 // Process the thread-local mark stacks and the GC mark stack.
1200 count += ProcessThreadLocalMarkStacks(false);
1201 while (!gc_mark_stack_->IsEmpty()) {
1202 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1203 ProcessMarkStackRef(to_ref);
1204 ++count;
1205 }
1206 gc_mark_stack_->Reset();
1207 } else if (mark_stack_mode == kMarkStackModeShared) {
1208 // Process the shared GC mark stack with a lock.
1209 {
1210 MutexLock mu(self, mark_stack_lock_);
1211 CHECK(revoked_mark_stacks_.empty());
1212 }
1213 while (true) {
1214 std::vector<mirror::Object*> refs;
1215 {
1216 // Copy refs with lock. Note the number of refs should be small.
1217 MutexLock mu(self, mark_stack_lock_);
1218 if (gc_mark_stack_->IsEmpty()) {
1219 break;
1220 }
1221 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1222 p != gc_mark_stack_->End(); ++p) {
1223 refs.push_back(p->AsMirrorPtr());
1224 }
1225 gc_mark_stack_->Reset();
1226 }
1227 for (mirror::Object* ref : refs) {
1228 ProcessMarkStackRef(ref);
1229 ++count;
1230 }
1231 }
1232 } else {
1233 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1234 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1235 {
1236 MutexLock mu(self, mark_stack_lock_);
1237 CHECK(revoked_mark_stacks_.empty());
1238 }
1239 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1240 while (!gc_mark_stack_->IsEmpty()) {
1241 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1242 ProcessMarkStackRef(to_ref);
1243 ++count;
1244 }
1245 gc_mark_stack_->Reset();
1246 }
1247
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001248 // Return true if the stack was empty.
1249 return count == 0;
1250}
1251
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001252size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1253 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1254 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1255 size_t count = 0;
1256 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1257 {
1258 MutexLock mu(Thread::Current(), mark_stack_lock_);
1259 // Make a copy of the mark stack vector.
1260 mark_stacks = revoked_mark_stacks_;
1261 revoked_mark_stacks_.clear();
1262 }
1263 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1264 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1265 mirror::Object* to_ref = p->AsMirrorPtr();
1266 ProcessMarkStackRef(to_ref);
1267 ++count;
1268 }
1269 {
1270 MutexLock mu(Thread::Current(), mark_stack_lock_);
1271 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1272 // The pool has enough. Delete it.
1273 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001274 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001275 // Otherwise, put it into the pool for later reuse.
1276 mark_stack->Reset();
1277 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001278 }
1279 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001280 }
1281 return count;
1282}
1283
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001284inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001285 DCHECK(!region_space_->IsInFromSpace(to_ref));
1286 if (kUseBakerReadBarrier) {
1287 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1288 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1289 << " is_marked=" << IsMarked(to_ref);
1290 }
1291 // Scan ref fields.
1292 Scan(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001293 if (kUseBakerReadBarrier) {
1294 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1295 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1296 << " is_marked=" << IsMarked(to_ref);
1297 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001298#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1299 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1300 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1301 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001302 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1303 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001304 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001305 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001306 // We may occasionally leave a reference white in the queue if its referent happens to be
1307 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1308 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1309 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001310 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001311 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1312 ReadBarrier::GrayPtr(),
1313 ReadBarrier::WhitePtr());
1314 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001315 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001316 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001317#else
1318 DCHECK(!kUseBakerReadBarrier);
1319#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001320
1321 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1322 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1323 // GC-running thread (no synchronization required).
1324 DCHECK(region_space_bitmap_->Test(to_ref));
1325 // Disable the read barrier in SizeOf for performance, which is safe.
1326 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1327 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1328 region_space_->AddLiveBytes(to_ref, alloc_size);
1329 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001330 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001331 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001332 visitor(to_ref);
1333 }
1334}
1335
1336void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1337 Thread* self = Thread::Current();
1338 CHECK(thread_running_gc_ != nullptr);
1339 CHECK_EQ(self, thread_running_gc_);
1340 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1341 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1342 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1343 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1344 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1345 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1346 weak_ref_access_enabled_.StoreRelaxed(false);
1347 QuasiAtomic::ThreadFenceForConstructor();
1348 // Process the thread local mark stacks one last time after switching to the shared mark stack
1349 // mode and disable weak ref accesses.
1350 ProcessThreadLocalMarkStacks(true);
1351 if (kVerboseMode) {
1352 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1353 }
1354}
1355
1356void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1357 Thread* self = Thread::Current();
1358 CHECK(thread_running_gc_ != nullptr);
1359 CHECK_EQ(self, thread_running_gc_);
1360 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1361 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1362 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1363 static_cast<uint32_t>(kMarkStackModeShared));
1364 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1365 QuasiAtomic::ThreadFenceForConstructor();
1366 if (kVerboseMode) {
1367 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1368 }
1369}
1370
1371void ConcurrentCopying::CheckEmptyMarkStack() {
1372 Thread* self = Thread::Current();
1373 CHECK(thread_running_gc_ != nullptr);
1374 CHECK_EQ(self, thread_running_gc_);
1375 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1376 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1377 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1378 // Thread-local mark stack mode.
1379 RevokeThreadLocalMarkStacks(false);
1380 MutexLock mu(Thread::Current(), mark_stack_lock_);
1381 if (!revoked_mark_stacks_.empty()) {
1382 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1383 while (!mark_stack->IsEmpty()) {
1384 mirror::Object* obj = mark_stack->PopBack();
1385 if (kUseBakerReadBarrier) {
1386 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1387 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1388 << " is_marked=" << IsMarked(obj);
1389 } else {
1390 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1391 << " is_marked=" << IsMarked(obj);
1392 }
1393 }
1394 }
1395 LOG(FATAL) << "mark stack is not empty";
1396 }
1397 } else {
1398 // Shared, GC-exclusive, or off.
1399 MutexLock mu(Thread::Current(), mark_stack_lock_);
1400 CHECK(gc_mark_stack_->IsEmpty());
1401 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001402 }
1403}
1404
1405void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1406 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1407 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001408 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001409}
1410
1411void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1412 {
1413 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1414 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1415 if (kEnableFromSpaceAccountingCheck) {
1416 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1417 }
1418 heap_->MarkAllocStackAsLive(live_stack);
1419 live_stack->Reset();
1420 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001421 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001422 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1423 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1424 if (space->IsContinuousMemMapAllocSpace()) {
1425 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001426 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001427 continue;
1428 }
1429 TimingLogger::ScopedTiming split2(
1430 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1431 RecordFree(alloc_space->Sweep(swap_bitmaps));
1432 }
1433 }
1434 SweepLargeObjects(swap_bitmaps);
1435}
1436
1437void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1438 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1439 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1440}
1441
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001442void ConcurrentCopying::ReclaimPhase() {
1443 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1444 if (kVerboseMode) {
1445 LOG(INFO) << "GC ReclaimPhase";
1446 }
1447 Thread* self = Thread::Current();
1448
1449 {
1450 // Double-check that the mark stack is empty.
1451 // Note: need to set this after VerifyNoFromSpaceRef().
1452 is_asserting_to_space_invariant_ = false;
1453 QuasiAtomic::ThreadFenceForConstructor();
1454 if (kVerboseMode) {
1455 LOG(INFO) << "Issue an empty check point. ";
1456 }
1457 IssueEmptyCheckpoint();
1458 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001459 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001460 if (kUseBakerReadBarrier) {
1461 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1462 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001463 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001464 }
1465
1466 {
1467 // Record freed objects.
1468 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1469 // Don't include thread-locals that are in the to-space.
1470 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1471 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1472 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1473 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1474 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1475 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1476 if (kEnableFromSpaceAccountingCheck) {
1477 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1478 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1479 }
1480 CHECK_LE(to_objects, from_objects);
1481 CHECK_LE(to_bytes, from_bytes);
1482 int64_t freed_bytes = from_bytes - to_bytes;
1483 int64_t freed_objects = from_objects - to_objects;
1484 if (kVerboseMode) {
1485 LOG(INFO) << "RecordFree:"
1486 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1487 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1488 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1489 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1490 << " from_space size=" << region_space_->FromSpaceSize()
1491 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1492 << " to_space size=" << region_space_->ToSpaceSize();
1493 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1494 }
1495 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1496 if (kVerboseMode) {
1497 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1498 }
1499 }
1500
1501 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001502 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1503 region_space_->ClearFromSpace();
1504 }
1505
1506 {
1507 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001508 Sweep(false);
1509 SwapBitmaps();
1510 heap_->UnBindBitmaps();
1511
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001512 // Delete the region bitmap.
1513 DCHECK(region_space_bitmap_ != nullptr);
1514 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001515 region_space_bitmap_ = nullptr;
1516 }
1517
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001518 CheckEmptyMarkStack();
1519
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001520 if (kVerboseMode) {
1521 LOG(INFO) << "GC end of ReclaimPhase";
1522 }
1523}
1524
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001525// Assert the to-space invariant.
1526void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1527 mirror::Object* ref) {
1528 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1529 if (is_asserting_to_space_invariant_) {
1530 if (region_space_->IsInToSpace(ref)) {
1531 // OK.
1532 return;
1533 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1534 CHECK(region_space_bitmap_->Test(ref)) << ref;
1535 } else if (region_space_->IsInFromSpace(ref)) {
1536 // Not OK. Do extra logging.
1537 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001538 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001539 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001540 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001541 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1542 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001543 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1544 }
1545 }
1546}
1547
1548class RootPrinter {
1549 public:
1550 RootPrinter() { }
1551
1552 template <class MirrorType>
1553 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001554 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001555 if (!root->IsNull()) {
1556 VisitRoot(root);
1557 }
1558 }
1559
1560 template <class MirrorType>
1561 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001562 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001563 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1564 }
1565
1566 template <class MirrorType>
1567 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001568 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001569 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1570 }
1571};
1572
1573void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1574 mirror::Object* ref) {
1575 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1576 if (is_asserting_to_space_invariant_) {
1577 if (region_space_->IsInToSpace(ref)) {
1578 // OK.
1579 return;
1580 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1581 CHECK(region_space_bitmap_->Test(ref)) << ref;
1582 } else if (region_space_->IsInFromSpace(ref)) {
1583 // Not OK. Do extra logging.
1584 if (gc_root_source == nullptr) {
1585 // No info.
1586 } else if (gc_root_source->HasArtField()) {
1587 ArtField* field = gc_root_source->GetArtField();
1588 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1589 RootPrinter root_printer;
1590 field->VisitRoots(root_printer);
1591 } else if (gc_root_source->HasArtMethod()) {
1592 ArtMethod* method = gc_root_source->GetArtMethod();
1593 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1594 RootPrinter root_printer;
Mathieu Chartier1147b9b2015-09-14 18:50:08 -07001595 method->VisitRoots(root_printer, sizeof(void*));
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001596 }
1597 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1598 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1599 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1600 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1601 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1602 } else {
1603 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1604 }
1605 }
1606}
1607
1608void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1609 if (kUseBakerReadBarrier) {
1610 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1611 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1612 } else {
1613 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1614 }
1615 if (region_space_->IsInFromSpace(obj)) {
1616 LOG(INFO) << "holder is in the from-space.";
1617 } else if (region_space_->IsInToSpace(obj)) {
1618 LOG(INFO) << "holder is in the to-space.";
1619 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1620 LOG(INFO) << "holder is in the unevac from-space.";
1621 if (region_space_bitmap_->Test(obj)) {
1622 LOG(INFO) << "holder is marked in the region space bitmap.";
1623 } else {
1624 LOG(INFO) << "holder is not marked in the region space bitmap.";
1625 }
1626 } else {
1627 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001628 if (immune_spaces_.ContainsObject(obj)) {
1629 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001630 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001631 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001632 accounting::ContinuousSpaceBitmap* mark_bitmap =
1633 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1634 accounting::LargeObjectBitmap* los_bitmap =
1635 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1636 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1637 bool is_los = mark_bitmap == nullptr;
1638 if (!is_los && mark_bitmap->Test(obj)) {
1639 LOG(INFO) << "holder is marked in the mark bit map.";
1640 } else if (is_los && los_bitmap->Test(obj)) {
1641 LOG(INFO) << "holder is marked in the los bit map.";
1642 } else {
1643 // If ref is on the allocation stack, then it is considered
1644 // mark/alive (but not necessarily on the live stack.)
1645 if (IsOnAllocStack(obj)) {
1646 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001647 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001648 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001649 }
1650 }
1651 }
1652 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001653 LOG(INFO) << "offset=" << offset.SizeValue();
1654}
1655
1656void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1657 mirror::Object* ref) {
1658 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001659 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001660 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001661 // Immune object may not be gray if called from the GC.
1662 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1663 return;
1664 }
1665 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1666 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001667 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001668 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1669 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1670 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001671 }
1672 } else {
1673 accounting::ContinuousSpaceBitmap* mark_bitmap =
1674 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1675 accounting::LargeObjectBitmap* los_bitmap =
1676 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1677 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1678 bool is_los = mark_bitmap == nullptr;
1679 if ((!is_los && mark_bitmap->Test(ref)) ||
1680 (is_los && los_bitmap->Test(ref))) {
1681 // OK.
1682 } else {
1683 // If ref is on the allocation stack, then it may not be
1684 // marked live, but considered marked/alive (but not
1685 // necessarily on the live stack).
1686 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1687 << "obj=" << obj << " ref=" << ref;
1688 }
1689 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001690}
1691
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001692// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001693class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001694 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001695 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001696 : collector_(collector) {}
1697
1698 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001699 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1700 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001701 collector_->Process(obj, offset);
1702 }
1703
1704 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001705 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001706 CHECK(klass->IsTypeOfReferenceClass());
1707 collector_->DelayReferenceReferent(klass, ref);
1708 }
1709
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001710 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001711 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001712 SHARED_REQUIRES(Locks::mutator_lock_) {
1713 if (!root->IsNull()) {
1714 VisitRoot(root);
1715 }
1716 }
1717
1718 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001719 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001720 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001721 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001722 }
1723
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001724 private:
1725 ConcurrentCopying* const collector_;
1726};
1727
1728// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001729inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001730 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001731 // Avoid all read barriers during visit references to help performance.
1732 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1733 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001734 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001735 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001736 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001737 // Disable the read barrier for a performance reason.
1738 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1739 visitor, visitor);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001740 if (kDisallowReadBarrierDuringScan) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001741 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1742 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001743}
1744
1745// Process a field.
1746inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001747 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001748 mirror::Object* ref = obj->GetFieldObject<
1749 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001750 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001751 if (to_ref == ref) {
1752 return;
1753 }
1754 // This may fail if the mutator writes to the field at the same time. But it's ok.
1755 mirror::Object* expected_ref = ref;
1756 mirror::Object* new_ref = to_ref;
1757 do {
1758 if (expected_ref !=
1759 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1760 // It was updated by the mutator.
1761 break;
1762 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001763 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001764 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001765}
1766
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001767// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001768inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001769 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1770 for (size_t i = 0; i < count; ++i) {
1771 mirror::Object** root = roots[i];
1772 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001773 mirror::Object* to_ref = Mark(ref);
1774 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001775 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001776 }
1777 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1778 mirror::Object* expected_ref = ref;
1779 mirror::Object* new_ref = to_ref;
1780 do {
1781 if (expected_ref != addr->LoadRelaxed()) {
1782 // It was updated by the mutator.
1783 break;
1784 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001785 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001786 }
1787}
1788
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001789template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001790inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001791 DCHECK(!root->IsNull());
1792 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001793 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001794 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001795 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1796 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1797 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001798 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001799 do {
1800 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1801 // It was updated by the mutator.
1802 break;
1803 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001804 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001805 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001806}
1807
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001808inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001809 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1810 const RootInfo& info ATTRIBUTE_UNUSED) {
1811 for (size_t i = 0; i < count; ++i) {
1812 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1813 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001814 // kGrayImmuneObject is true because this is used for the thread flip.
1815 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001816 }
1817 }
1818}
1819
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001820// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1821class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1822 public:
1823 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1824 : collector_(collector), enabled_(false) {
1825 if (kUseBakerReadBarrier &&
1826 collector_->thread_running_gc_ == Thread::Current() &&
1827 !collector_->gc_grays_immune_objects_) {
1828 collector_->gc_grays_immune_objects_ = true;
1829 enabled_ = true;
1830 }
1831 }
1832
1833 ~ScopedGcGraysImmuneObjects() {
1834 if (kUseBakerReadBarrier &&
1835 collector_->thread_running_gc_ == Thread::Current() &&
1836 enabled_) {
1837 DCHECK(collector_->gc_grays_immune_objects_);
1838 collector_->gc_grays_immune_objects_ = false;
1839 }
1840 }
1841
1842 private:
1843 ConcurrentCopying* const collector_;
1844 bool enabled_;
1845};
1846
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001847// Fill the given memory block with a dummy object. Used to fill in a
1848// copy of objects that was lost in race.
1849void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001850 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1851 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1852 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1853 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001854 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001855 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001856 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
1857 // Explicitly mark to make sure to get an object in the to-space.
1858 mirror::Class* int_array_class = down_cast<mirror::Class*>(
1859 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001860 CHECK(int_array_class != nullptr);
1861 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001862 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001863 CHECK_EQ(component_size, sizeof(int32_t));
1864 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1865 if (data_offset > byte_size) {
1866 // An int array is too big. Use java.lang.Object.
1867 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1868 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001869 CHECK_EQ(byte_size, (java_lang_Object->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001870 dummy_obj->SetClass(java_lang_Object);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001871 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001872 } else {
1873 // Use an int array.
1874 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001875 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001876 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001877 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
1878 dummy_arr->SetLength(length);
1879 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001880 << "byte_size=" << byte_size << " length=" << length
1881 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001882 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001883 << "byte_size=" << byte_size << " length=" << length
1884 << " component_size=" << component_size << " data_offset=" << data_offset;
1885 }
1886}
1887
1888// Reuse the memory blocks that were copy of objects that were lost in race.
1889mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1890 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001891 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001892 Thread* self = Thread::Current();
1893 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001894 size_t byte_size;
1895 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001896 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001897 MutexLock mu(self, skipped_blocks_lock_);
1898 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1899 if (it == skipped_blocks_map_.end()) {
1900 // Not found.
1901 return nullptr;
1902 }
1903 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001904 CHECK_GE(byte_size, alloc_size);
1905 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1906 // If remainder would be too small for a dummy object, retry with a larger request size.
1907 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1908 if (it == skipped_blocks_map_.end()) {
1909 // Not found.
1910 return nullptr;
1911 }
Roland Levillain14d90572015-07-16 10:52:26 +01001912 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001913 CHECK_GE(it->first - alloc_size, min_object_size)
1914 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1915 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001916 // Found a block.
1917 CHECK(it != skipped_blocks_map_.end());
1918 byte_size = it->first;
1919 addr = it->second;
1920 CHECK_GE(byte_size, alloc_size);
1921 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1922 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
1923 if (kVerboseMode) {
1924 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1925 }
1926 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001927 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001928 memset(addr, 0, byte_size);
1929 if (byte_size > alloc_size) {
1930 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001931 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001932 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001933 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
1934 // violation and possible deadlock. The deadlock case is a recursive case:
1935 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001936 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1937 byte_size - alloc_size);
1938 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07001939 {
1940 MutexLock mu(self, skipped_blocks_lock_);
1941 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1942 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001943 }
1944 return reinterpret_cast<mirror::Object*>(addr);
1945}
1946
1947mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1948 DCHECK(region_space_->IsInFromSpace(from_ref));
1949 // No read barrier to avoid nested RB that might violate the to-space
1950 // invariant. Note that from_ref is a from space ref so the SizeOf()
1951 // call will access the from-space meta objects, but it's ok and necessary.
1952 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1953 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1954 size_t region_space_bytes_allocated = 0U;
1955 size_t non_moving_space_bytes_allocated = 0U;
1956 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001957 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001958 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001959 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001960 bytes_allocated = region_space_bytes_allocated;
1961 if (to_ref != nullptr) {
1962 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1963 }
1964 bool fall_back_to_non_moving = false;
1965 if (UNLIKELY(to_ref == nullptr)) {
1966 // Failed to allocate in the region space. Try the skipped blocks.
1967 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1968 if (to_ref != nullptr) {
1969 // Succeeded to allocate in a skipped block.
1970 if (heap_->use_tlab_) {
1971 // This is necessary for the tlab case as it's not accounted in the space.
1972 region_space_->RecordAlloc(to_ref);
1973 }
1974 bytes_allocated = region_space_alloc_size;
1975 } else {
1976 // Fall back to the non-moving space.
1977 fall_back_to_non_moving = true;
1978 if (kVerboseMode) {
1979 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1980 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1981 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1982 }
1983 fall_back_to_non_moving = true;
1984 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001985 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001986 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1987 bytes_allocated = non_moving_space_bytes_allocated;
1988 // Mark it in the mark bitmap.
1989 accounting::ContinuousSpaceBitmap* mark_bitmap =
1990 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1991 CHECK(mark_bitmap != nullptr);
1992 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1993 }
1994 }
1995 DCHECK(to_ref != nullptr);
1996
1997 // Attempt to install the forward pointer. This is in a loop as the
1998 // lock word atomic write can fail.
1999 while (true) {
2000 // Copy the object. TODO: copy only the lockword in the second iteration and on?
2001 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002002
2003 LockWord old_lock_word = to_ref->GetLockWord(false);
2004
2005 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2006 // Lost the race. Another thread (either GC or mutator) stored
2007 // the forwarding pointer first. Make the lost copy (to_ref)
2008 // look like a valid but dead (dummy) object and keep it for
2009 // future reuse.
2010 FillWithDummyObject(to_ref, bytes_allocated);
2011 if (!fall_back_to_non_moving) {
2012 DCHECK(region_space_->IsInToSpace(to_ref));
2013 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2014 // Free the large alloc.
2015 region_space_->FreeLarge(to_ref, bytes_allocated);
2016 } else {
2017 // Record the lost copy for later reuse.
2018 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2019 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2020 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2021 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2022 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2023 reinterpret_cast<uint8_t*>(to_ref)));
2024 }
2025 } else {
2026 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2027 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2028 // Free the non-moving-space chunk.
2029 accounting::ContinuousSpaceBitmap* mark_bitmap =
2030 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2031 CHECK(mark_bitmap != nullptr);
2032 CHECK(mark_bitmap->Clear(to_ref));
2033 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2034 }
2035
2036 // Get the winner's forward ptr.
2037 mirror::Object* lost_fwd_ptr = to_ref;
2038 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2039 CHECK(to_ref != nullptr);
2040 CHECK_NE(to_ref, lost_fwd_ptr);
2041 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
2042 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2043 return to_ref;
2044 }
2045
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002046 // Set the gray ptr.
2047 if (kUseBakerReadBarrier) {
2048 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
2049 }
2050
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002051 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2052
2053 // Try to atomically write the fwd ptr.
2054 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
2055 if (LIKELY(success)) {
2056 // The CAS succeeded.
2057 objects_moved_.FetchAndAddSequentiallyConsistent(1);
2058 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
2059 if (LIKELY(!fall_back_to_non_moving)) {
2060 DCHECK(region_space_->IsInToSpace(to_ref));
2061 } else {
2062 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2063 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2064 }
2065 if (kUseBakerReadBarrier) {
2066 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2067 }
2068 DCHECK(GetFwdPtr(from_ref) == to_ref);
2069 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002070 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002071 return to_ref;
2072 } else {
2073 // The CAS failed. It may have lost the race or may have failed
2074 // due to monitor/hashcode ops. Either way, retry.
2075 }
2076 }
2077}
2078
2079mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2080 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002081 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2082 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002083 // It's already marked.
2084 return from_ref;
2085 }
2086 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002087 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002088 to_ref = GetFwdPtr(from_ref);
2089 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2090 heap_->non_moving_space_->HasAddress(to_ref))
2091 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002092 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002093 if (region_space_bitmap_->Test(from_ref)) {
2094 to_ref = from_ref;
2095 } else {
2096 to_ref = nullptr;
2097 }
2098 } else {
2099 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002100 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002101 // An immune object is alive.
2102 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002103 } else {
2104 // Non-immune non-moving space. Use the mark bitmap.
2105 accounting::ContinuousSpaceBitmap* mark_bitmap =
2106 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2107 accounting::LargeObjectBitmap* los_bitmap =
2108 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2109 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2110 bool is_los = mark_bitmap == nullptr;
2111 if (!is_los && mark_bitmap->Test(from_ref)) {
2112 // Already marked.
2113 to_ref = from_ref;
2114 } else if (is_los && los_bitmap->Test(from_ref)) {
2115 // Already marked in LOS.
2116 to_ref = from_ref;
2117 } else {
2118 // Not marked.
2119 if (IsOnAllocStack(from_ref)) {
2120 // If on the allocation stack, it's considered marked.
2121 to_ref = from_ref;
2122 } else {
2123 // Not marked.
2124 to_ref = nullptr;
2125 }
2126 }
2127 }
2128 }
2129 return to_ref;
2130}
2131
2132bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2133 QuasiAtomic::ThreadFenceAcquire();
2134 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002135 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002136}
2137
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002138mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2139 // ref is in a non-moving space (from_ref == to_ref).
2140 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002141 DCHECK(!immune_spaces_.ContainsObject(ref));
2142 // Use the mark bitmap.
2143 accounting::ContinuousSpaceBitmap* mark_bitmap =
2144 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2145 accounting::LargeObjectBitmap* los_bitmap =
2146 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2147 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2148 bool is_los = mark_bitmap == nullptr;
2149 if (!is_los && mark_bitmap->Test(ref)) {
2150 // Already marked.
2151 if (kUseBakerReadBarrier) {
2152 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2153 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002154 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002155 } else if (is_los && los_bitmap->Test(ref)) {
2156 // Already marked in LOS.
2157 if (kUseBakerReadBarrier) {
2158 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2159 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2160 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002161 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002162 // Not marked.
2163 if (IsOnAllocStack(ref)) {
2164 // If it's on the allocation stack, it's considered marked. Keep it white.
2165 // Objects on the allocation stack need not be marked.
2166 if (!is_los) {
2167 DCHECK(!mark_bitmap->Test(ref));
2168 } else {
2169 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002170 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002171 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002172 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002173 }
2174 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002175 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2176 // kRegionTypeUnevacFromSpace-case comment in Mark().
2177 if (kUseBakerReadBarrier) {
2178 // Test the bitmap first to reduce the chance of false gray cases.
2179 if ((!is_los && mark_bitmap->Test(ref)) ||
2180 (is_los && los_bitmap->Test(ref))) {
2181 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002182 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002183 }
2184 // Not marked or on the allocation stack. Try to mark it.
2185 // This may or may not succeed, which is ok.
2186 bool cas_success = false;
2187 if (kUseBakerReadBarrier) {
2188 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
2189 ReadBarrier::GrayPtr());
2190 }
2191 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2192 // Already marked.
2193 if (kUseBakerReadBarrier && cas_success &&
2194 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2195 PushOntoFalseGrayStack(ref);
2196 }
2197 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2198 // Already marked in LOS.
2199 if (kUseBakerReadBarrier && cas_success &&
2200 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2201 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002202 }
2203 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002204 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002205 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002206 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002207 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002208 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002209 }
2210 }
2211 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002212 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002213}
2214
2215void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002216 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002217 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002218 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002219 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2220 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002221 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002222 {
2223 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2224 skipped_blocks_map_.clear();
2225 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002226 {
2227 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002228 {
2229 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2230 heap_->ClearMarkedObjects();
2231 }
2232 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2233 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2234 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2235 gc::Heap* const heap = Runtime::Current()->GetHeap();
2236 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2237 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2238 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
2239 // Filter out cards that don't need to be set.
2240 if (table != nullptr) {
2241 table->FilterCards();
2242 }
2243 }
2244 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002245 }
2246 if (measure_read_barrier_slow_path_) {
2247 MutexLock mu(self, rb_slow_path_histogram_lock_);
2248 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2249 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2250 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2251 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002252}
2253
Mathieu Chartier97509952015-07-13 14:35:43 -07002254bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002255 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002256 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002257 if (to_ref == nullptr) {
2258 return false;
2259 }
2260 if (from_ref != to_ref) {
2261 QuasiAtomic::ThreadFenceRelease();
2262 field->Assign(to_ref);
2263 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2264 }
2265 return true;
2266}
2267
Mathieu Chartier97509952015-07-13 14:35:43 -07002268mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2269 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002270}
2271
2272void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002273 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002274}
2275
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002276void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002277 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002278 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002279 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2280 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002281 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002282}
2283
2284void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2285 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2286 region_space_->RevokeAllThreadLocalBuffers();
2287}
2288
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002289mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2290 if (Thread::Current() != thread_running_gc_) {
2291 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2292 } else {
2293 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2294 }
2295 ScopedTrace tr(__FUNCTION__);
2296 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2297 mirror::Object* ret = Mark(from_ref);
2298 if (measure_read_barrier_slow_path_) {
2299 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2300 }
2301 return ret;
2302}
2303
2304void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2305 GarbageCollector::DumpPerformanceInfo(os);
2306 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2307 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2308 Histogram<uint64_t>::CumulativeData cumulative_data;
2309 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2310 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2311 }
2312 if (rb_slow_path_count_total_ > 0) {
2313 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2314 }
2315 if (rb_slow_path_count_gc_total_ > 0) {
2316 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2317 }
2318}
2319
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002320} // namespace collector
2321} // namespace gc
2322} // namespace art