blob: 90446b03aec247884c4e9e8ee4de4f5a914d37a1 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070020#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070021#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070022#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070023#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070025#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080026#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070027#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080029#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080030#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080031#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070032#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033#include "mirror/object-inl.h"
34#include "scoped_thread_state_change.h"
35#include "thread-inl.h"
36#include "thread_list.h"
37#include "well_known_classes.h"
38
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070039namespace art {
40namespace gc {
41namespace collector {
42
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070043static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070044// If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
45// pages.
46static constexpr bool kGrayDirtyImmuneObjects = true;
47// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
48// union table. Disabled since it does not seem to help the pause much.
49static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070050
Mathieu Chartier56fe2582016-07-14 13:30:03 -070051ConcurrentCopying::ConcurrentCopying(Heap* heap,
52 const std::string& name_prefix,
53 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080054 : GarbageCollector(heap,
55 name_prefix + (name_prefix.empty() ? "" : " ") +
56 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070057 region_space_(nullptr), gc_barrier_(new Barrier(0)),
58 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070059 kDefaultGcMarkStackSize,
60 kDefaultGcMarkStackSize)),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070061 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
62 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080063 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070064 region_space_bitmap_(nullptr),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
66 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080067 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070068 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
69 rb_slow_path_ns_(0),
70 rb_slow_path_count_(0),
71 rb_slow_path_count_gc_(0),
72 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
73 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
74 rb_slow_path_count_total_(0),
75 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070077 force_evacuate_all_(false),
78 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
79 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
81 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070082 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080083 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
85 // Cache this so that we won't have to lock heap_bitmap_lock_ in
86 // Mark() which could cause a nested lock on heap_bitmap_lock_
87 // when GC causes a RB while doing GC or a lock order violation
88 // (class_linker_lock_ and heap_bitmap_lock_).
89 heap_mark_bitmap_ = heap->GetMarkBitmap();
90 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070091 {
92 MutexLock mu(self, mark_stack_lock_);
93 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
94 accounting::AtomicStack<mirror::Object>* mark_stack =
95 accounting::AtomicStack<mirror::Object>::Create(
96 "thread local mark stack", kMarkStackSize, kMarkStackSize);
97 pooled_mark_stacks_.push_back(mark_stack);
98 }
99 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800100}
101
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700102void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
103 // Used for preserving soft references, should be OK to not have a CAS here since there should be
104 // no other threads which can trigger read barriers on the same referent during reference
105 // processing.
106 from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
Mathieu Chartier81187812015-07-15 14:24:07 -0700107 DCHECK(!from_ref->IsNull());
Mathieu Chartier97509952015-07-13 14:35:43 -0700108}
109
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800110ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700111 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800112}
113
114void ConcurrentCopying::RunPhases() {
115 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
116 CHECK(!is_active_);
117 is_active_ = true;
118 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700119 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800120 Locks::mutator_lock_->AssertNotHeld(self);
121 {
122 ReaderMutexLock mu(self, *Locks::mutator_lock_);
123 InitializePhase();
124 }
125 FlipThreadRoots();
126 {
127 ReaderMutexLock mu(self, *Locks::mutator_lock_);
128 MarkingPhase();
129 }
130 // Verify no from space refs. This causes a pause.
131 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
132 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
133 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700134 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800135 if (kVerboseMode) {
136 LOG(INFO) << "Verifying no from-space refs";
137 }
138 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700139 if (kVerboseMode) {
140 LOG(INFO) << "Done verifying no from-space refs";
141 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700142 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800143 }
144 {
145 ReaderMutexLock mu(self, *Locks::mutator_lock_);
146 ReclaimPhase();
147 }
148 FinishPhase();
149 CHECK(is_active_);
150 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700151 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800152}
153
154void ConcurrentCopying::BindBitmaps() {
155 Thread* self = Thread::Current();
156 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
157 // Mark all of the spaces we never collect as immune.
158 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800159 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
160 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800161 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800162 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800163 } else if (space == region_space_) {
164 accounting::ContinuousSpaceBitmap* bitmap =
165 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
166 space->Begin(), space->Capacity());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800167 region_space_bitmap_ = bitmap;
168 }
169 }
170}
171
172void ConcurrentCopying::InitializePhase() {
173 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
174 if (kVerboseMode) {
175 LOG(INFO) << "GC InitializePhase";
176 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
177 << reinterpret_cast<void*>(region_space_->Limit());
178 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700179 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800180 if (kIsDebugBuild) {
181 MutexLock mu(Thread::Current(), mark_stack_lock_);
182 CHECK(false_gray_stack_.empty());
183 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700184
185 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
186 if (measure_read_barrier_slow_path_) {
187 rb_slow_path_ns_.StoreRelaxed(0);
188 rb_slow_path_count_.StoreRelaxed(0);
189 rb_slow_path_count_gc_.StoreRelaxed(0);
190 }
191
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800192 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800193 bytes_moved_.StoreRelaxed(0);
194 objects_moved_.StoreRelaxed(0);
195 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
196 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
197 GetCurrentIteration()->GetClearSoftReferences()) {
198 force_evacuate_all_ = true;
199 } else {
200 force_evacuate_all_ = false;
201 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700202 if (kUseBakerReadBarrier) {
203 updated_all_immune_objects_.StoreRelaxed(false);
204 // GC may gray immune objects in the thread flip.
205 gc_grays_immune_objects_ = true;
206 if (kIsDebugBuild) {
207 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
208 DCHECK(immune_gray_stack_.empty());
209 }
210 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800211 BindBitmaps();
212 if (kVerboseMode) {
213 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800214 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
215 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
216 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
217 LOG(INFO) << "Immune space: " << *space;
218 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800219 LOG(INFO) << "GC end of InitializePhase";
220 }
221}
222
223// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700224class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800225 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100226 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800227 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
228 }
229
Mathieu Chartier90443472015-07-16 20:32:27 -0700230 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 // Note: self is not necessarily equal to thread since thread may be suspended.
232 Thread* self = Thread::Current();
233 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
234 << thread->GetState() << " thread " << thread << " self " << self;
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700235 thread->SetIsGcMarking(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800236 if (use_tlab_ && thread->HasTlab()) {
237 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
238 // This must come before the revoke.
239 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
240 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
241 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
242 FetchAndAddSequentiallyConsistent(thread_local_objects);
243 } else {
244 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
245 }
246 }
247 if (kUseThreadLocalAllocationStack) {
248 thread->RevokeThreadLocalAllocationStack();
249 }
250 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700251 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
252 // only.
253 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800254 concurrent_copying_->GetBarrier().Pass(self);
255 }
256
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700257 void VisitRoots(mirror::Object*** roots,
258 size_t count,
259 const RootInfo& info ATTRIBUTE_UNUSED)
260 SHARED_REQUIRES(Locks::mutator_lock_) {
261 for (size_t i = 0; i < count; ++i) {
262 mirror::Object** root = roots[i];
263 mirror::Object* ref = *root;
264 if (ref != nullptr) {
265 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
266 if (to_ref != ref) {
267 *root = to_ref;
268 }
269 }
270 }
271 }
272
273 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
274 size_t count,
275 const RootInfo& info ATTRIBUTE_UNUSED)
276 SHARED_REQUIRES(Locks::mutator_lock_) {
277 for (size_t i = 0; i < count; ++i) {
278 mirror::CompressedReference<mirror::Object>* const root = roots[i];
279 if (!root->IsNull()) {
280 mirror::Object* ref = root->AsMirrorPtr();
281 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
282 if (to_ref != ref) {
283 root->Assign(to_ref);
284 }
285 }
286 }
287 }
288
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800289 private:
290 ConcurrentCopying* const concurrent_copying_;
291 const bool use_tlab_;
292};
293
294// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700295class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800296 public:
297 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
298 : concurrent_copying_(concurrent_copying) {
299 }
300
Mathieu Chartier90443472015-07-16 20:32:27 -0700301 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800302 ConcurrentCopying* cc = concurrent_copying_;
303 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
304 // Note: self is not necessarily equal to thread since thread may be suspended.
305 Thread* self = Thread::Current();
306 CHECK(thread == self);
307 Locks::mutator_lock_->AssertExclusiveHeld(self);
308 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700309 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800310 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
311 cc->RecordLiveStackFreezeSize(self);
312 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
313 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
314 }
315 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700316 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800317 if (kIsDebugBuild) {
318 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
319 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800320 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800321 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800322 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700323 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800324 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700325 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
326 cc->GrayAllDirtyImmuneObjects();
327 if (kIsDebugBuild) {
328 // Check that all non-gray immune objects only refernce immune objects.
329 cc->VerifyGrayImmuneObjects();
330 }
331 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800332 }
333
334 private:
335 ConcurrentCopying* const concurrent_copying_;
336};
337
Mathieu Chartier21328a12016-07-22 10:47:45 -0700338class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
339 public:
340 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
341 : collector_(collector) {}
342
343 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
344 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
345 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
346 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
347 obj, offset);
348 }
349
350 void operator()(mirror::Class* klass, mirror::Reference* ref) const
351 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
352 CHECK(klass->IsTypeOfReferenceClass());
353 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
354 ref,
355 mirror::Reference::ReferentOffset());
356 }
357
358 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
359 ALWAYS_INLINE
360 SHARED_REQUIRES(Locks::mutator_lock_) {
361 if (!root->IsNull()) {
362 VisitRoot(root);
363 }
364 }
365
366 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
367 ALWAYS_INLINE
368 SHARED_REQUIRES(Locks::mutator_lock_) {
369 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
370 }
371
372 private:
373 ConcurrentCopying* const collector_;
374
375 void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
376 SHARED_REQUIRES(Locks::mutator_lock_) {
377 if (ref != nullptr) {
378 CHECK(collector_->immune_spaces_.ContainsObject(ref))
379 << "Non gray object references non immune object "<< ref << " " << PrettyTypeOf(ref)
380 << " in holder " << holder << " " << PrettyTypeOf(holder) << " offset="
381 << offset.Uint32Value();
382 }
383 }
384};
385
386void ConcurrentCopying::VerifyGrayImmuneObjects() {
387 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
388 for (auto& space : immune_spaces_.GetSpaces()) {
389 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
390 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
391 VerifyGrayImmuneObjectsVisitor visitor(this);
392 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
393 reinterpret_cast<uintptr_t>(space->Limit()),
394 [&visitor](mirror::Object* obj)
395 SHARED_REQUIRES(Locks::mutator_lock_) {
396 // If an object is not gray, it should only have references to things in the immune spaces.
397 if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
398 obj->VisitReferences</*kVisitNativeRoots*/true,
399 kDefaultVerifyFlags,
400 kWithoutReadBarrier>(visitor, visitor);
401 }
402 });
403 }
404}
405
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800406// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
407void ConcurrentCopying::FlipThreadRoots() {
408 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
409 if (kVerboseMode) {
410 LOG(INFO) << "time=" << region_space_->Time();
411 region_space_->DumpNonFreeRegions(LOG(INFO));
412 }
413 Thread* self = Thread::Current();
414 Locks::mutator_lock_->AssertNotHeld(self);
415 gc_barrier_->Init(self, 0);
416 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
417 FlipCallback flip_callback(this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700418 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800419 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
420 &thread_flip_visitor, &flip_callback, this);
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700421 heap_->ThreadFlipEnd(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800422 {
423 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
424 gc_barrier_->Increment(self, barrier_count);
425 }
426 is_asserting_to_space_invariant_ = true;
427 QuasiAtomic::ThreadFenceForConstructor();
428 if (kVerboseMode) {
429 LOG(INFO) << "time=" << region_space_->Time();
430 region_space_->DumpNonFreeRegions(LOG(INFO));
431 LOG(INFO) << "GC end of FlipThreadRoots";
432 }
433}
434
Mathieu Chartier21328a12016-07-22 10:47:45 -0700435class ConcurrentCopying::GrayImmuneObjectVisitor {
436 public:
437 explicit GrayImmuneObjectVisitor() {}
438
439 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
440 if (kUseBakerReadBarrier) {
441 if (kIsDebugBuild) {
442 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
443 }
444 obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
445 }
446 }
447
448 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
449 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
450 }
451};
452
453void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
454 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
455 gc::Heap* const heap = Runtime::Current()->GetHeap();
456 accounting::CardTable* const card_table = heap->GetCardTable();
457 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
458 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
459 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
460 GrayImmuneObjectVisitor visitor;
461 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
462 // Mark all the objects on dirty cards since these may point to objects in other space.
463 // Once these are marked, the GC will eventually clear them later.
464 // Table is non null for boot image and zygote spaces. It is only null for application image
465 // spaces.
466 if (table != nullptr) {
467 // TODO: Add preclean outside the pause.
468 table->ClearCards();
469 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
470 } else {
471 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
472 // pause because app image spaces are all dirty pages anyways.
473 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
474 }
475 }
476 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
477 // barriers in the immune spaces.
478 updated_all_immune_objects_.StoreRelaxed(true);
479}
480
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700481void ConcurrentCopying::SwapStacks() {
482 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800483}
484
485void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
486 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
487 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
488}
489
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800490class EmptyCheckpoint : public Closure {
491 public:
492 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
493 : concurrent_copying_(concurrent_copying) {
494 }
495
496 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
497 // Note: self is not necessarily equal to thread since thread may be suspended.
498 Thread* self = Thread::Current();
499 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
500 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800501 // If thread is a running mutator, then act on behalf of the garbage collector.
502 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700503 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800504 }
505
506 private:
507 ConcurrentCopying* const concurrent_copying_;
508};
509
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700510// Used to visit objects in the immune spaces.
511inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
512 DCHECK(obj != nullptr);
513 DCHECK(immune_spaces_.ContainsObject(obj));
514 // Update the fields without graying it or pushing it onto the mark stack.
515 Scan(obj);
516}
517
518class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
519 public:
520 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
521 : collector_(cc) {}
522
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700523 ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700524 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
525 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
526 collector_->ScanImmuneObject(obj);
527 // Done scanning the object, go back to white.
528 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
529 ReadBarrier::WhitePtr());
530 CHECK(success);
531 }
532 } else {
533 collector_->ScanImmuneObject(obj);
534 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700535 }
536
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700537 static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
538 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
539 }
540
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700541 private:
542 ConcurrentCopying* const collector_;
543};
544
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800545// Concurrently mark roots that are guarded by read barriers and process the mark stack.
546void ConcurrentCopying::MarkingPhase() {
547 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
548 if (kVerboseMode) {
549 LOG(INFO) << "GC MarkingPhase";
550 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700551 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700552
553 // Scan immune spaces.
554 // Update all the fields in the immune spaces first without graying the objects so that we
555 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
556 // of the objects.
557 if (kUseBakerReadBarrier) {
558 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700559 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700560 {
561 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
562 for (auto& space : immune_spaces_.GetSpaces()) {
563 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
564 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700565 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700566 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi8256da32016-07-27 14:08:19 -0700567 if (table != nullptr) {
568 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
569 } else {
570 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
571 reinterpret_cast<uintptr_t>(space->Limit()),
572 visitor);
573 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700574 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700575 }
576 if (kUseBakerReadBarrier) {
577 // This release fence makes the field updates in the above loop visible before allowing mutator
578 // getting access to immune objects without graying it first.
579 updated_all_immune_objects_.StoreRelease(true);
580 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
581 // the above loop because we would incorrectly disable the read barrier by whitening an object
582 // which may point to an unscanned, white object, breaking the to-space invariant.
583 //
584 // Make sure no mutators are in the middle of marking an immune object before whitening immune
585 // objects.
586 IssueEmptyCheckpoint();
587 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
588 if (kVerboseMode) {
589 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
590 }
591 for (mirror::Object* obj : immune_gray_stack_) {
592 DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
593 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
594 ReadBarrier::WhitePtr());
595 DCHECK(success);
596 }
597 immune_gray_stack_.clear();
598 }
599
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800600 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700601 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
602 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800603 }
604 {
605 // TODO: don't visit the transaction roots if it's not active.
606 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700607 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800608 }
609
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800610 Thread* self = Thread::Current();
611 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700612 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700613 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
614 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
615 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
616 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
617 // reach the point where we process weak references, we can avoid using a lock when accessing
618 // the GC mark stack, which makes mark stack processing more efficient.
619
620 // Process the mark stack once in the thread local stack mode. This marks most of the live
621 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
622 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
623 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800624 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700625 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
626 // for the last time before transitioning to the shared mark stack mode, which would process new
627 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
628 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
629 // important to do these together in a single checkpoint so that we can ensure that mutators
630 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
631 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
632 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
633 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
634 SwitchToSharedMarkStackMode();
635 CHECK(!self->GetWeakRefAccessEnabled());
636 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
637 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
638 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
639 // (via read barriers) have no way to produce any more refs to process. Marking converges once
640 // before we process weak refs below.
641 ProcessMarkStack();
642 CheckEmptyMarkStack();
643 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
644 // lock from this point on.
645 SwitchToGcExclusiveMarkStackMode();
646 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800647 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800648 LOG(INFO) << "ProcessReferences";
649 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700650 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700651 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700652 ProcessReferences(self);
653 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800654 if (kVerboseMode) {
655 LOG(INFO) << "SweepSystemWeaks";
656 }
657 SweepSystemWeaks(self);
658 if (kVerboseMode) {
659 LOG(INFO) << "SweepSystemWeaks done";
660 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700661 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
662 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
663 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800664 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700665 CheckEmptyMarkStack();
666 // Re-enable weak ref accesses.
667 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700668 // Free data for class loaders that we unloaded.
669 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700670 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700671 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800672 if (kUseBakerReadBarrier) {
673 ProcessFalseGrayStack();
674 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700675 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800676 }
677
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700678 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800679 if (kVerboseMode) {
680 LOG(INFO) << "GC end of MarkingPhase";
681 }
682}
683
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700684void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
685 if (kVerboseMode) {
686 LOG(INFO) << "ReenableWeakRefAccess";
687 }
688 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
689 QuasiAtomic::ThreadFenceForConstructor();
690 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
691 {
692 MutexLock mu(self, *Locks::thread_list_lock_);
693 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
694 for (Thread* thread : thread_list) {
695 thread->SetWeakRefAccessEnabled(true);
696 }
697 }
698 // Unblock blocking threads.
699 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
700 Runtime::Current()->BroadcastForNewSystemWeaks();
701}
702
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700703class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700704 public:
705 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
706 : concurrent_copying_(concurrent_copying) {
707 }
708
709 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
710 // Note: self is not necessarily equal to thread since thread may be suspended.
711 Thread* self = Thread::Current();
712 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
713 << thread->GetState() << " thread " << thread << " self " << self;
714 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700715 // Note a thread that has just started right before this checkpoint may have already this flag
716 // set to false, which is ok.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700717 thread->SetIsGcMarking(false);
718 // If thread is a running mutator, then act on behalf of the garbage collector.
719 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700720 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700721 }
722
723 private:
724 ConcurrentCopying* const concurrent_copying_;
725};
726
727void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
728 Thread* self = Thread::Current();
729 DisableMarkingCheckpoint check_point(this);
730 ThreadList* thread_list = Runtime::Current()->GetThreadList();
731 gc_barrier_->Init(self, 0);
732 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
733 // If there are no threads to wait which implies that all the checkpoint functions are finished,
734 // then no need to release the mutator lock.
735 if (barrier_count == 0) {
736 return;
737 }
738 // Release locks then wait for all mutator threads to pass the barrier.
739 Locks::mutator_lock_->SharedUnlock(self);
740 {
741 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
742 gc_barrier_->Increment(self, barrier_count);
743 }
744 Locks::mutator_lock_->SharedLock(self);
745}
746
747void ConcurrentCopying::DisableMarking() {
748 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
749 // thread-local flags so that a new thread starting up will get the correct is_marking flag.
750 is_marking_ = false;
751 QuasiAtomic::ThreadFenceForConstructor();
752 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
753 // still in the middle of a read barrier which may have a from-space ref cached in a local
754 // variable.
755 IssueDisableMarkingCheckpoint();
756 if (kUseTableLookupReadBarrier) {
757 heap_->rb_table_->ClearAll();
758 DCHECK(heap_->rb_table_->IsAllCleared());
759 }
760 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
761 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
762}
763
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800764void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
765 CHECK(kUseBakerReadBarrier);
766 DCHECK(ref != nullptr);
767 MutexLock mu(Thread::Current(), mark_stack_lock_);
768 false_gray_stack_.push_back(ref);
769}
770
771void ConcurrentCopying::ProcessFalseGrayStack() {
772 CHECK(kUseBakerReadBarrier);
773 // Change the objects on the false gray stack from gray to white.
774 MutexLock mu(Thread::Current(), mark_stack_lock_);
775 for (mirror::Object* obj : false_gray_stack_) {
776 DCHECK(IsMarked(obj));
777 // The object could be white here if a thread got preempted after a success at the
778 // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
779 // still gray), and the thread ran to register it onto the false gray stack.
780 if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
781 bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
782 ReadBarrier::WhitePtr());
783 DCHECK(success);
784 }
785 }
786 false_gray_stack_.clear();
787}
788
789
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800790void ConcurrentCopying::IssueEmptyCheckpoint() {
791 Thread* self = Thread::Current();
792 EmptyCheckpoint check_point(this);
793 ThreadList* thread_list = Runtime::Current()->GetThreadList();
794 gc_barrier_->Init(self, 0);
795 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800796 // If there are no threads to wait which implys that all the checkpoint functions are finished,
797 // then no need to release the mutator lock.
798 if (barrier_count == 0) {
799 return;
800 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800801 // Release locks then wait for all mutator threads to pass the barrier.
802 Locks::mutator_lock_->SharedUnlock(self);
803 {
804 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
805 gc_barrier_->Increment(self, barrier_count);
806 }
807 Locks::mutator_lock_->SharedLock(self);
808}
809
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700810void ConcurrentCopying::ExpandGcMarkStack() {
811 DCHECK(gc_mark_stack_->IsFull());
812 const size_t new_size = gc_mark_stack_->Capacity() * 2;
813 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
814 gc_mark_stack_->End());
815 gc_mark_stack_->Resize(new_size);
816 for (auto& ref : temp) {
817 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
818 }
819 DCHECK(!gc_mark_stack_->IsFull());
820}
821
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800822void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700823 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800824 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700825 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
826 CHECK(thread_running_gc_ != nullptr);
827 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700828 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
829 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700830 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
831 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700832 if (UNLIKELY(gc_mark_stack_->IsFull())) {
833 ExpandGcMarkStack();
834 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700835 gc_mark_stack_->PushBack(to_ref);
836 } else {
837 // Otherwise, use a thread-local mark stack.
838 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
839 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
840 MutexLock mu(self, mark_stack_lock_);
841 // Get a new thread local mark stack.
842 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
843 if (!pooled_mark_stacks_.empty()) {
844 // Use a pooled mark stack.
845 new_tl_mark_stack = pooled_mark_stacks_.back();
846 pooled_mark_stacks_.pop_back();
847 } else {
848 // None pooled. Create a new one.
849 new_tl_mark_stack =
850 accounting::AtomicStack<mirror::Object>::Create(
851 "thread local mark stack", 4 * KB, 4 * KB);
852 }
853 DCHECK(new_tl_mark_stack != nullptr);
854 DCHECK(new_tl_mark_stack->IsEmpty());
855 new_tl_mark_stack->PushBack(to_ref);
856 self->SetThreadLocalMarkStack(new_tl_mark_stack);
857 if (tl_mark_stack != nullptr) {
858 // Store the old full stack into a vector.
859 revoked_mark_stacks_.push_back(tl_mark_stack);
860 }
861 } else {
862 tl_mark_stack->PushBack(to_ref);
863 }
864 }
865 } else if (mark_stack_mode == kMarkStackModeShared) {
866 // Access the shared GC mark stack with a lock.
867 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700868 if (UNLIKELY(gc_mark_stack_->IsFull())) {
869 ExpandGcMarkStack();
870 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700871 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800872 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700873 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700874 static_cast<uint32_t>(kMarkStackModeGcExclusive))
875 << "ref=" << to_ref
876 << " self->gc_marking=" << self->GetIsGcMarking()
877 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700878 CHECK(self == thread_running_gc_)
879 << "Only GC-running thread should access the mark stack "
880 << "in the GC exclusive mark stack mode";
881 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700882 if (UNLIKELY(gc_mark_stack_->IsFull())) {
883 ExpandGcMarkStack();
884 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700885 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800886 }
887}
888
889accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
890 return heap_->allocation_stack_.get();
891}
892
893accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
894 return heap_->live_stack_.get();
895}
896
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800897// The following visitors are used to verify that there's no references to the from-space left after
898// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700899class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800900 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700901 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800902 : collector_(collector) {}
903
904 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700905 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800906 if (ref == nullptr) {
907 // OK.
908 return;
909 }
910 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
911 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800912 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
913 << "Ref " << ref << " " << PrettyTypeOf(ref)
914 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800915 }
916 }
917
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700918 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Mathieu Chartier90443472015-07-16 20:32:27 -0700919 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800920 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700921 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800922 }
923
924 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700925 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800926};
927
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700928class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800929 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700930 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800931 : collector_(collector) {}
932
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700933 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700934 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800935 mirror::Object* ref =
936 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700937 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800938 visitor(ref);
939 }
940 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700941 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800942 CHECK(klass->IsTypeOfReferenceClass());
943 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
944 }
945
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700946 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
947 SHARED_REQUIRES(Locks::mutator_lock_) {
948 if (!root->IsNull()) {
949 VisitRoot(root);
950 }
951 }
952
953 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
954 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700955 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700956 visitor(root->AsMirrorPtr());
957 }
958
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800959 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700960 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800961};
962
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700963class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800964 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700965 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800966 : collector_(collector) {}
967 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700968 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800969 ObjectCallback(obj, collector_);
970 }
971 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700972 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800973 CHECK(obj != nullptr);
974 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
975 space::RegionSpace* region_space = collector->RegionSpace();
976 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700977 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -0700978 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800979 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800980 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
981 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800982 }
983 }
984
985 private:
986 ConcurrentCopying* const collector_;
987};
988
989// Verify there's no from-space references left after the marking phase.
990void ConcurrentCopying::VerifyNoFromSpaceReferences() {
991 Thread* self = Thread::Current();
992 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700993 // Verify all threads have is_gc_marking to be false
994 {
995 MutexLock mu(self, *Locks::thread_list_lock_);
996 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
997 for (Thread* thread : thread_list) {
998 CHECK(!thread->GetIsGcMarking());
999 }
1000 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001001 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001002 // Roots.
1003 {
1004 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001005 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001006 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001007 }
1008 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001009 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001010 // Non-moving spaces.
1011 {
1012 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1013 heap_->GetMarkBitmap()->Visit(visitor);
1014 }
1015 // The alloc stack.
1016 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001017 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001018 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1019 it < end; ++it) {
1020 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001021 if (obj != nullptr && obj->GetClass() != nullptr) {
1022 // TODO: need to call this only if obj is alive?
1023 ref_visitor(obj);
1024 visitor(obj);
1025 }
1026 }
1027 }
1028 // TODO: LOS. But only refs in LOS are classes.
1029}
1030
1031// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001032class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001033 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001034 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001035 : collector_(collector) {}
1036
1037 void operator()(mirror::Object* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001038 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001039 if (ref == nullptr) {
1040 // OK.
1041 return;
1042 }
1043 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1044 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001045
1046 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001047 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001048};
1049
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001050class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001051 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001052 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001053 : collector_(collector) {}
1054
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001055 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001056 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001057 mirror::Object* ref =
1058 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001059 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001060 visitor(ref);
1061 }
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001062 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001063 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001064 CHECK(klass->IsTypeOfReferenceClass());
1065 }
1066
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001067 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1068 SHARED_REQUIRES(Locks::mutator_lock_) {
1069 if (!root->IsNull()) {
1070 VisitRoot(root);
1071 }
1072 }
1073
1074 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1075 SHARED_REQUIRES(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001076 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001077 visitor(root->AsMirrorPtr());
1078 }
1079
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001080 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001081 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001082};
1083
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001084class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001085 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001086 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001087 : collector_(collector) {}
1088 void operator()(mirror::Object* obj) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001089 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001090 ObjectCallback(obj, collector_);
1091 }
1092 static void ObjectCallback(mirror::Object* obj, void *arg)
Mathieu Chartier90443472015-07-16 20:32:27 -07001093 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001094 CHECK(obj != nullptr);
1095 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1096 space::RegionSpace* region_space = collector->RegionSpace();
1097 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1098 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001099 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartier059ef3d2015-08-18 13:54:21 -07001100 obj->VisitReferences(visitor, visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001101 }
1102
1103 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001104 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001105};
1106
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001107class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001108 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001109 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1110 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001111 : concurrent_copying_(concurrent_copying),
1112 disable_weak_ref_access_(disable_weak_ref_access) {
1113 }
1114
1115 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1116 // Note: self is not necessarily equal to thread since thread may be suspended.
1117 Thread* self = Thread::Current();
1118 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1119 << thread->GetState() << " thread " << thread << " self " << self;
1120 // Revoke thread local mark stacks.
1121 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1122 if (tl_mark_stack != nullptr) {
1123 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1124 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1125 thread->SetThreadLocalMarkStack(nullptr);
1126 }
1127 // Disable weak ref access.
1128 if (disable_weak_ref_access_) {
1129 thread->SetWeakRefAccessEnabled(false);
1130 }
1131 // If thread is a running mutator, then act on behalf of the garbage collector.
1132 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001133 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001134 }
1135
1136 private:
1137 ConcurrentCopying* const concurrent_copying_;
1138 const bool disable_weak_ref_access_;
1139};
1140
1141void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
1142 Thread* self = Thread::Current();
1143 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1144 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1145 gc_barrier_->Init(self, 0);
1146 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1147 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1148 // then no need to release the mutator lock.
1149 if (barrier_count == 0) {
1150 return;
1151 }
1152 Locks::mutator_lock_->SharedUnlock(self);
1153 {
1154 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1155 gc_barrier_->Increment(self, barrier_count);
1156 }
1157 Locks::mutator_lock_->SharedLock(self);
1158}
1159
1160void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1161 Thread* self = Thread::Current();
1162 CHECK_EQ(self, thread);
1163 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1164 if (tl_mark_stack != nullptr) {
1165 CHECK(is_marking_);
1166 MutexLock mu(self, mark_stack_lock_);
1167 revoked_mark_stacks_.push_back(tl_mark_stack);
1168 thread->SetThreadLocalMarkStack(nullptr);
1169 }
1170}
1171
1172void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001173 if (kVerboseMode) {
1174 LOG(INFO) << "ProcessMarkStack. ";
1175 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001176 bool empty_prev = false;
1177 while (true) {
1178 bool empty = ProcessMarkStackOnce();
1179 if (empty_prev && empty) {
1180 // Saw empty mark stack for a second time, done.
1181 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001182 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001183 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001184 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001185}
1186
1187bool ConcurrentCopying::ProcessMarkStackOnce() {
1188 Thread* self = Thread::Current();
1189 CHECK(thread_running_gc_ != nullptr);
1190 CHECK(self == thread_running_gc_);
1191 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1192 size_t count = 0;
1193 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1194 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1195 // Process the thread-local mark stacks and the GC mark stack.
1196 count += ProcessThreadLocalMarkStacks(false);
1197 while (!gc_mark_stack_->IsEmpty()) {
1198 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1199 ProcessMarkStackRef(to_ref);
1200 ++count;
1201 }
1202 gc_mark_stack_->Reset();
1203 } else if (mark_stack_mode == kMarkStackModeShared) {
1204 // Process the shared GC mark stack with a lock.
1205 {
1206 MutexLock mu(self, mark_stack_lock_);
1207 CHECK(revoked_mark_stacks_.empty());
1208 }
1209 while (true) {
1210 std::vector<mirror::Object*> refs;
1211 {
1212 // Copy refs with lock. Note the number of refs should be small.
1213 MutexLock mu(self, mark_stack_lock_);
1214 if (gc_mark_stack_->IsEmpty()) {
1215 break;
1216 }
1217 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1218 p != gc_mark_stack_->End(); ++p) {
1219 refs.push_back(p->AsMirrorPtr());
1220 }
1221 gc_mark_stack_->Reset();
1222 }
1223 for (mirror::Object* ref : refs) {
1224 ProcessMarkStackRef(ref);
1225 ++count;
1226 }
1227 }
1228 } else {
1229 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1230 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1231 {
1232 MutexLock mu(self, mark_stack_lock_);
1233 CHECK(revoked_mark_stacks_.empty());
1234 }
1235 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1236 while (!gc_mark_stack_->IsEmpty()) {
1237 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1238 ProcessMarkStackRef(to_ref);
1239 ++count;
1240 }
1241 gc_mark_stack_->Reset();
1242 }
1243
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001244 // Return true if the stack was empty.
1245 return count == 0;
1246}
1247
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001248size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1249 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1250 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1251 size_t count = 0;
1252 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1253 {
1254 MutexLock mu(Thread::Current(), mark_stack_lock_);
1255 // Make a copy of the mark stack vector.
1256 mark_stacks = revoked_mark_stacks_;
1257 revoked_mark_stacks_.clear();
1258 }
1259 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1260 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1261 mirror::Object* to_ref = p->AsMirrorPtr();
1262 ProcessMarkStackRef(to_ref);
1263 ++count;
1264 }
1265 {
1266 MutexLock mu(Thread::Current(), mark_stack_lock_);
1267 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1268 // The pool has enough. Delete it.
1269 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001270 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001271 // Otherwise, put it into the pool for later reuse.
1272 mark_stack->Reset();
1273 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001274 }
1275 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001276 }
1277 return count;
1278}
1279
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001280inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001281 DCHECK(!region_space_->IsInFromSpace(to_ref));
1282 if (kUseBakerReadBarrier) {
1283 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1284 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1285 << " is_marked=" << IsMarked(to_ref);
1286 }
1287 // Scan ref fields.
1288 Scan(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001289 if (kUseBakerReadBarrier) {
1290 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1291 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1292 << " is_marked=" << IsMarked(to_ref);
1293 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001294#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1295 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1296 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1297 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001298 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1299 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001300 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001301 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001302 // We may occasionally leave a reference white in the queue if its referent happens to be
1303 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1304 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1305 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001306 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001307 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
1308 ReadBarrier::GrayPtr(),
1309 ReadBarrier::WhitePtr());
1310 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001311 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001312 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001313#else
1314 DCHECK(!kUseBakerReadBarrier);
1315#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001316
1317 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1318 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1319 // GC-running thread (no synchronization required).
1320 DCHECK(region_space_bitmap_->Test(to_ref));
1321 // Disable the read barrier in SizeOf for performance, which is safe.
1322 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1323 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1324 region_space_->AddLiveBytes(to_ref, alloc_size);
1325 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001326 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001327 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001328 visitor(to_ref);
1329 }
1330}
1331
1332void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1333 Thread* self = Thread::Current();
1334 CHECK(thread_running_gc_ != nullptr);
1335 CHECK_EQ(self, thread_running_gc_);
1336 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1337 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1338 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1339 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1340 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1341 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1342 weak_ref_access_enabled_.StoreRelaxed(false);
1343 QuasiAtomic::ThreadFenceForConstructor();
1344 // Process the thread local mark stacks one last time after switching to the shared mark stack
1345 // mode and disable weak ref accesses.
1346 ProcessThreadLocalMarkStacks(true);
1347 if (kVerboseMode) {
1348 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1349 }
1350}
1351
1352void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1353 Thread* self = Thread::Current();
1354 CHECK(thread_running_gc_ != nullptr);
1355 CHECK_EQ(self, thread_running_gc_);
1356 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1357 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1358 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1359 static_cast<uint32_t>(kMarkStackModeShared));
1360 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1361 QuasiAtomic::ThreadFenceForConstructor();
1362 if (kVerboseMode) {
1363 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1364 }
1365}
1366
1367void ConcurrentCopying::CheckEmptyMarkStack() {
1368 Thread* self = Thread::Current();
1369 CHECK(thread_running_gc_ != nullptr);
1370 CHECK_EQ(self, thread_running_gc_);
1371 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1372 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1373 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1374 // Thread-local mark stack mode.
1375 RevokeThreadLocalMarkStacks(false);
1376 MutexLock mu(Thread::Current(), mark_stack_lock_);
1377 if (!revoked_mark_stacks_.empty()) {
1378 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1379 while (!mark_stack->IsEmpty()) {
1380 mirror::Object* obj = mark_stack->PopBack();
1381 if (kUseBakerReadBarrier) {
1382 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1383 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1384 << " is_marked=" << IsMarked(obj);
1385 } else {
1386 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1387 << " is_marked=" << IsMarked(obj);
1388 }
1389 }
1390 }
1391 LOG(FATAL) << "mark stack is not empty";
1392 }
1393 } else {
1394 // Shared, GC-exclusive, or off.
1395 MutexLock mu(Thread::Current(), mark_stack_lock_);
1396 CHECK(gc_mark_stack_->IsEmpty());
1397 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001398 }
1399}
1400
1401void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1402 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1403 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001404 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001405}
1406
1407void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1408 {
1409 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1410 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1411 if (kEnableFromSpaceAccountingCheck) {
1412 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1413 }
1414 heap_->MarkAllocStackAsLive(live_stack);
1415 live_stack->Reset();
1416 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001417 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001418 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1419 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1420 if (space->IsContinuousMemMapAllocSpace()) {
1421 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001422 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001423 continue;
1424 }
1425 TimingLogger::ScopedTiming split2(
1426 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1427 RecordFree(alloc_space->Sweep(swap_bitmaps));
1428 }
1429 }
1430 SweepLargeObjects(swap_bitmaps);
1431}
1432
1433void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1434 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1435 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1436}
1437
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001438void ConcurrentCopying::ReclaimPhase() {
1439 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1440 if (kVerboseMode) {
1441 LOG(INFO) << "GC ReclaimPhase";
1442 }
1443 Thread* self = Thread::Current();
1444
1445 {
1446 // Double-check that the mark stack is empty.
1447 // Note: need to set this after VerifyNoFromSpaceRef().
1448 is_asserting_to_space_invariant_ = false;
1449 QuasiAtomic::ThreadFenceForConstructor();
1450 if (kVerboseMode) {
1451 LOG(INFO) << "Issue an empty check point. ";
1452 }
1453 IssueEmptyCheckpoint();
1454 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001455 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001456 if (kUseBakerReadBarrier) {
1457 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1458 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001459 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001460 }
1461
1462 {
1463 // Record freed objects.
1464 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1465 // Don't include thread-locals that are in the to-space.
1466 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1467 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1468 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1469 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1470 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1471 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1472 if (kEnableFromSpaceAccountingCheck) {
1473 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1474 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1475 }
1476 CHECK_LE(to_objects, from_objects);
1477 CHECK_LE(to_bytes, from_bytes);
1478 int64_t freed_bytes = from_bytes - to_bytes;
1479 int64_t freed_objects = from_objects - to_objects;
1480 if (kVerboseMode) {
1481 LOG(INFO) << "RecordFree:"
1482 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1483 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1484 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1485 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1486 << " from_space size=" << region_space_->FromSpaceSize()
1487 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1488 << " to_space size=" << region_space_->ToSpaceSize();
1489 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1490 }
1491 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1492 if (kVerboseMode) {
1493 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1494 }
1495 }
1496
1497 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001498 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1499 region_space_->ClearFromSpace();
1500 }
1501
1502 {
1503 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001504 Sweep(false);
1505 SwapBitmaps();
1506 heap_->UnBindBitmaps();
1507
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001508 // Delete the region bitmap.
1509 DCHECK(region_space_bitmap_ != nullptr);
1510 delete region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001511 region_space_bitmap_ = nullptr;
1512 }
1513
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001514 CheckEmptyMarkStack();
1515
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001516 if (kVerboseMode) {
1517 LOG(INFO) << "GC end of ReclaimPhase";
1518 }
1519}
1520
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001521// Assert the to-space invariant.
1522void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1523 mirror::Object* ref) {
1524 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1525 if (is_asserting_to_space_invariant_) {
1526 if (region_space_->IsInToSpace(ref)) {
1527 // OK.
1528 return;
1529 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1530 CHECK(region_space_bitmap_->Test(ref)) << ref;
1531 } else if (region_space_->IsInFromSpace(ref)) {
1532 // Not OK. Do extra logging.
1533 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001534 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001535 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001536 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001537 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1538 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001539 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1540 }
1541 }
1542}
1543
1544class RootPrinter {
1545 public:
1546 RootPrinter() { }
1547
1548 template <class MirrorType>
1549 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001550 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001551 if (!root->IsNull()) {
1552 VisitRoot(root);
1553 }
1554 }
1555
1556 template <class MirrorType>
1557 void VisitRoot(mirror::Object** root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001558 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001559 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1560 }
1561
1562 template <class MirrorType>
1563 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Mathieu Chartier90443472015-07-16 20:32:27 -07001564 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001565 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1566 }
1567};
1568
1569void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1570 mirror::Object* ref) {
1571 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1572 if (is_asserting_to_space_invariant_) {
1573 if (region_space_->IsInToSpace(ref)) {
1574 // OK.
1575 return;
1576 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1577 CHECK(region_space_bitmap_->Test(ref)) << ref;
1578 } else if (region_space_->IsInFromSpace(ref)) {
1579 // Not OK. Do extra logging.
1580 if (gc_root_source == nullptr) {
1581 // No info.
1582 } else if (gc_root_source->HasArtField()) {
1583 ArtField* field = gc_root_source->GetArtField();
1584 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1585 RootPrinter root_printer;
1586 field->VisitRoots(root_printer);
1587 } else if (gc_root_source->HasArtMethod()) {
1588 ArtMethod* method = gc_root_source->GetArtMethod();
1589 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1590 RootPrinter root_printer;
Mathieu Chartier1147b9b2015-09-14 18:50:08 -07001591 method->VisitRoots(root_printer, sizeof(void*));
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001592 }
1593 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1594 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1595 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1596 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1597 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1598 } else {
1599 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1600 }
1601 }
1602}
1603
1604void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1605 if (kUseBakerReadBarrier) {
1606 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1607 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1608 } else {
1609 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1610 }
1611 if (region_space_->IsInFromSpace(obj)) {
1612 LOG(INFO) << "holder is in the from-space.";
1613 } else if (region_space_->IsInToSpace(obj)) {
1614 LOG(INFO) << "holder is in the to-space.";
1615 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1616 LOG(INFO) << "holder is in the unevac from-space.";
1617 if (region_space_bitmap_->Test(obj)) {
1618 LOG(INFO) << "holder is marked in the region space bitmap.";
1619 } else {
1620 LOG(INFO) << "holder is not marked in the region space bitmap.";
1621 }
1622 } else {
1623 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001624 if (immune_spaces_.ContainsObject(obj)) {
1625 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001626 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001627 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001628 accounting::ContinuousSpaceBitmap* mark_bitmap =
1629 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1630 accounting::LargeObjectBitmap* los_bitmap =
1631 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1632 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1633 bool is_los = mark_bitmap == nullptr;
1634 if (!is_los && mark_bitmap->Test(obj)) {
1635 LOG(INFO) << "holder is marked in the mark bit map.";
1636 } else if (is_los && los_bitmap->Test(obj)) {
1637 LOG(INFO) << "holder is marked in the los bit map.";
1638 } else {
1639 // If ref is on the allocation stack, then it is considered
1640 // mark/alive (but not necessarily on the live stack.)
1641 if (IsOnAllocStack(obj)) {
1642 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001643 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001644 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001645 }
1646 }
1647 }
1648 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001649 LOG(INFO) << "offset=" << offset.SizeValue();
1650}
1651
1652void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1653 mirror::Object* ref) {
1654 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001655 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001656 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001657 // Immune object may not be gray if called from the GC.
1658 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1659 return;
1660 }
1661 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
1662 CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001663 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001664 << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
1665 << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
1666 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001667 }
1668 } else {
1669 accounting::ContinuousSpaceBitmap* mark_bitmap =
1670 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1671 accounting::LargeObjectBitmap* los_bitmap =
1672 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1673 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1674 bool is_los = mark_bitmap == nullptr;
1675 if ((!is_los && mark_bitmap->Test(ref)) ||
1676 (is_los && los_bitmap->Test(ref))) {
1677 // OK.
1678 } else {
1679 // If ref is on the allocation stack, then it may not be
1680 // marked live, but considered marked/alive (but not
1681 // necessarily on the live stack).
1682 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1683 << "obj=" << obj << " ref=" << ref;
1684 }
1685 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001686}
1687
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001688// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001689class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001690 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001691 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001692 : collector_(collector) {}
1693
1694 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Mathieu Chartier90443472015-07-16 20:32:27 -07001695 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1696 SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001697 collector_->Process(obj, offset);
1698 }
1699
1700 void operator()(mirror::Class* klass, mirror::Reference* ref) const
Mathieu Chartier90443472015-07-16 20:32:27 -07001701 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001702 CHECK(klass->IsTypeOfReferenceClass());
1703 collector_->DelayReferenceReferent(klass, ref);
1704 }
1705
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001706 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001707 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001708 SHARED_REQUIRES(Locks::mutator_lock_) {
1709 if (!root->IsNull()) {
1710 VisitRoot(root);
1711 }
1712 }
1713
1714 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001715 ALWAYS_INLINE
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001716 SHARED_REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001717 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001718 }
1719
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001720 private:
1721 ConcurrentCopying* const collector_;
1722};
1723
1724// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001725inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001726 if (kIsDebugBuild) {
1727 // Avoid all read barriers during visit references to help performance.
1728 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1729 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001730 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001731 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001732 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001733 // Disable the read barrier for a performance reason.
1734 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1735 visitor, visitor);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001736 if (kIsDebugBuild) {
1737 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1738 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001739}
1740
1741// Process a field.
1742inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001743 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001744 mirror::Object* ref = obj->GetFieldObject<
1745 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001746 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001747 if (to_ref == ref) {
1748 return;
1749 }
1750 // This may fail if the mutator writes to the field at the same time. But it's ok.
1751 mirror::Object* expected_ref = ref;
1752 mirror::Object* new_ref = to_ref;
1753 do {
1754 if (expected_ref !=
1755 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1756 // It was updated by the mutator.
1757 break;
1758 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001759 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001760 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001761}
1762
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001763// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001764inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001765 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1766 for (size_t i = 0; i < count; ++i) {
1767 mirror::Object** root = roots[i];
1768 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001769 mirror::Object* to_ref = Mark(ref);
1770 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001771 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001772 }
1773 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1774 mirror::Object* expected_ref = ref;
1775 mirror::Object* new_ref = to_ref;
1776 do {
1777 if (expected_ref != addr->LoadRelaxed()) {
1778 // It was updated by the mutator.
1779 break;
1780 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001781 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001782 }
1783}
1784
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001785template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001786inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001787 DCHECK(!root->IsNull());
1788 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001789 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001790 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001791 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1792 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1793 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001794 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001795 do {
1796 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1797 // It was updated by the mutator.
1798 break;
1799 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001800 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001801 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001802}
1803
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001804inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001805 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1806 const RootInfo& info ATTRIBUTE_UNUSED) {
1807 for (size_t i = 0; i < count; ++i) {
1808 mirror::CompressedReference<mirror::Object>* const root = roots[i];
1809 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001810 // kGrayImmuneObject is true because this is used for the thread flip.
1811 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001812 }
1813 }
1814}
1815
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001816// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
1817class ConcurrentCopying::ScopedGcGraysImmuneObjects {
1818 public:
1819 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
1820 : collector_(collector), enabled_(false) {
1821 if (kUseBakerReadBarrier &&
1822 collector_->thread_running_gc_ == Thread::Current() &&
1823 !collector_->gc_grays_immune_objects_) {
1824 collector_->gc_grays_immune_objects_ = true;
1825 enabled_ = true;
1826 }
1827 }
1828
1829 ~ScopedGcGraysImmuneObjects() {
1830 if (kUseBakerReadBarrier &&
1831 collector_->thread_running_gc_ == Thread::Current() &&
1832 enabled_) {
1833 DCHECK(collector_->gc_grays_immune_objects_);
1834 collector_->gc_grays_immune_objects_ = false;
1835 }
1836 }
1837
1838 private:
1839 ConcurrentCopying* const collector_;
1840 bool enabled_;
1841};
1842
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001843// Fill the given memory block with a dummy object. Used to fill in a
1844// copy of objects that was lost in race.
1845void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001846 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
1847 // barriers here because we need the updated reference to the int array class, etc. Temporary set
1848 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
1849 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001850 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001851 memset(dummy_obj, 0, byte_size);
1852 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1853 CHECK(int_array_class != nullptr);
1854 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001855 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001856 CHECK_EQ(component_size, sizeof(int32_t));
1857 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1858 if (data_offset > byte_size) {
1859 // An int array is too big. Use java.lang.Object.
1860 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1861 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1862 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1863 dummy_obj->SetClass(java_lang_Object);
1864 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1865 } else {
1866 // Use an int array.
1867 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001868 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001869 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001870 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
1871 dummy_arr->SetLength(length);
1872 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001873 << "byte_size=" << byte_size << " length=" << length
1874 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001875 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001876 << "byte_size=" << byte_size << " length=" << length
1877 << " component_size=" << component_size << " data_offset=" << data_offset;
1878 }
1879}
1880
1881// Reuse the memory blocks that were copy of objects that were lost in race.
1882mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1883 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01001884 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001885 Thread* self = Thread::Current();
1886 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1887 MutexLock mu(self, skipped_blocks_lock_);
1888 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1889 if (it == skipped_blocks_map_.end()) {
1890 // Not found.
1891 return nullptr;
1892 }
1893 {
1894 size_t byte_size = it->first;
1895 CHECK_GE(byte_size, alloc_size);
1896 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1897 // If remainder would be too small for a dummy object, retry with a larger request size.
1898 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1899 if (it == skipped_blocks_map_.end()) {
1900 // Not found.
1901 return nullptr;
1902 }
Roland Levillain14d90572015-07-16 10:52:26 +01001903 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001904 CHECK_GE(it->first - alloc_size, min_object_size)
1905 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1906 }
1907 }
1908 // Found a block.
1909 CHECK(it != skipped_blocks_map_.end());
1910 size_t byte_size = it->first;
1911 uint8_t* addr = it->second;
1912 CHECK_GE(byte_size, alloc_size);
1913 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
Roland Levillain14d90572015-07-16 10:52:26 +01001914 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001915 if (kVerboseMode) {
1916 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1917 }
1918 skipped_blocks_map_.erase(it);
1919 memset(addr, 0, byte_size);
1920 if (byte_size > alloc_size) {
1921 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01001922 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001923 CHECK_GE(byte_size - alloc_size, min_object_size);
1924 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1925 byte_size - alloc_size);
1926 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1927 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1928 }
1929 return reinterpret_cast<mirror::Object*>(addr);
1930}
1931
1932mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1933 DCHECK(region_space_->IsInFromSpace(from_ref));
1934 // No read barrier to avoid nested RB that might violate the to-space
1935 // invariant. Note that from_ref is a from space ref so the SizeOf()
1936 // call will access the from-space meta objects, but it's ok and necessary.
1937 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1938 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1939 size_t region_space_bytes_allocated = 0U;
1940 size_t non_moving_space_bytes_allocated = 0U;
1941 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001942 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001943 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001944 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001945 bytes_allocated = region_space_bytes_allocated;
1946 if (to_ref != nullptr) {
1947 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1948 }
1949 bool fall_back_to_non_moving = false;
1950 if (UNLIKELY(to_ref == nullptr)) {
1951 // Failed to allocate in the region space. Try the skipped blocks.
1952 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1953 if (to_ref != nullptr) {
1954 // Succeeded to allocate in a skipped block.
1955 if (heap_->use_tlab_) {
1956 // This is necessary for the tlab case as it's not accounted in the space.
1957 region_space_->RecordAlloc(to_ref);
1958 }
1959 bytes_allocated = region_space_alloc_size;
1960 } else {
1961 // Fall back to the non-moving space.
1962 fall_back_to_non_moving = true;
1963 if (kVerboseMode) {
1964 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1965 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1966 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1967 }
1968 fall_back_to_non_moving = true;
1969 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001970 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001971 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1972 bytes_allocated = non_moving_space_bytes_allocated;
1973 // Mark it in the mark bitmap.
1974 accounting::ContinuousSpaceBitmap* mark_bitmap =
1975 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1976 CHECK(mark_bitmap != nullptr);
1977 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1978 }
1979 }
1980 DCHECK(to_ref != nullptr);
1981
1982 // Attempt to install the forward pointer. This is in a loop as the
1983 // lock word atomic write can fail.
1984 while (true) {
1985 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1986 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001987
1988 LockWord old_lock_word = to_ref->GetLockWord(false);
1989
1990 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1991 // Lost the race. Another thread (either GC or mutator) stored
1992 // the forwarding pointer first. Make the lost copy (to_ref)
1993 // look like a valid but dead (dummy) object and keep it for
1994 // future reuse.
1995 FillWithDummyObject(to_ref, bytes_allocated);
1996 if (!fall_back_to_non_moving) {
1997 DCHECK(region_space_->IsInToSpace(to_ref));
1998 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1999 // Free the large alloc.
2000 region_space_->FreeLarge(to_ref, bytes_allocated);
2001 } else {
2002 // Record the lost copy for later reuse.
2003 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2004 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2005 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2006 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2007 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2008 reinterpret_cast<uint8_t*>(to_ref)));
2009 }
2010 } else {
2011 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2012 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2013 // Free the non-moving-space chunk.
2014 accounting::ContinuousSpaceBitmap* mark_bitmap =
2015 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2016 CHECK(mark_bitmap != nullptr);
2017 CHECK(mark_bitmap->Clear(to_ref));
2018 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2019 }
2020
2021 // Get the winner's forward ptr.
2022 mirror::Object* lost_fwd_ptr = to_ref;
2023 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2024 CHECK(to_ref != nullptr);
2025 CHECK_NE(to_ref, lost_fwd_ptr);
2026 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
2027 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2028 return to_ref;
2029 }
2030
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002031 // Set the gray ptr.
2032 if (kUseBakerReadBarrier) {
2033 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
2034 }
2035
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002036 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2037
2038 // Try to atomically write the fwd ptr.
2039 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
2040 if (LIKELY(success)) {
2041 // The CAS succeeded.
2042 objects_moved_.FetchAndAddSequentiallyConsistent(1);
2043 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
2044 if (LIKELY(!fall_back_to_non_moving)) {
2045 DCHECK(region_space_->IsInToSpace(to_ref));
2046 } else {
2047 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2048 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2049 }
2050 if (kUseBakerReadBarrier) {
2051 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2052 }
2053 DCHECK(GetFwdPtr(from_ref) == to_ref);
2054 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002055 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002056 return to_ref;
2057 } else {
2058 // The CAS failed. It may have lost the race or may have failed
2059 // due to monitor/hashcode ops. Either way, retry.
2060 }
2061 }
2062}
2063
2064mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2065 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002066 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2067 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002068 // It's already marked.
2069 return from_ref;
2070 }
2071 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002072 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002073 to_ref = GetFwdPtr(from_ref);
2074 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2075 heap_->non_moving_space_->HasAddress(to_ref))
2076 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002077 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002078 if (region_space_bitmap_->Test(from_ref)) {
2079 to_ref = from_ref;
2080 } else {
2081 to_ref = nullptr;
2082 }
2083 } else {
2084 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002085 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002086 // An immune object is alive.
2087 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002088 } else {
2089 // Non-immune non-moving space. Use the mark bitmap.
2090 accounting::ContinuousSpaceBitmap* mark_bitmap =
2091 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2092 accounting::LargeObjectBitmap* los_bitmap =
2093 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2094 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2095 bool is_los = mark_bitmap == nullptr;
2096 if (!is_los && mark_bitmap->Test(from_ref)) {
2097 // Already marked.
2098 to_ref = from_ref;
2099 } else if (is_los && los_bitmap->Test(from_ref)) {
2100 // Already marked in LOS.
2101 to_ref = from_ref;
2102 } else {
2103 // Not marked.
2104 if (IsOnAllocStack(from_ref)) {
2105 // If on the allocation stack, it's considered marked.
2106 to_ref = from_ref;
2107 } else {
2108 // Not marked.
2109 to_ref = nullptr;
2110 }
2111 }
2112 }
2113 }
2114 return to_ref;
2115}
2116
2117bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2118 QuasiAtomic::ThreadFenceAcquire();
2119 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002120 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002121}
2122
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002123mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2124 // ref is in a non-moving space (from_ref == to_ref).
2125 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002126 DCHECK(!immune_spaces_.ContainsObject(ref));
2127 // Use the mark bitmap.
2128 accounting::ContinuousSpaceBitmap* mark_bitmap =
2129 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2130 accounting::LargeObjectBitmap* los_bitmap =
2131 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2132 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2133 bool is_los = mark_bitmap == nullptr;
2134 if (!is_los && mark_bitmap->Test(ref)) {
2135 // Already marked.
2136 if (kUseBakerReadBarrier) {
2137 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2138 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002139 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002140 } else if (is_los && los_bitmap->Test(ref)) {
2141 // Already marked in LOS.
2142 if (kUseBakerReadBarrier) {
2143 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2144 ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2145 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002146 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002147 // Not marked.
2148 if (IsOnAllocStack(ref)) {
2149 // If it's on the allocation stack, it's considered marked. Keep it white.
2150 // Objects on the allocation stack need not be marked.
2151 if (!is_los) {
2152 DCHECK(!mark_bitmap->Test(ref));
2153 } else {
2154 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002155 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002156 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002157 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002158 }
2159 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002160 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2161 // kRegionTypeUnevacFromSpace-case comment in Mark().
2162 if (kUseBakerReadBarrier) {
2163 // Test the bitmap first to reduce the chance of false gray cases.
2164 if ((!is_los && mark_bitmap->Test(ref)) ||
2165 (is_los && los_bitmap->Test(ref))) {
2166 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002167 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002168 }
2169 // Not marked or on the allocation stack. Try to mark it.
2170 // This may or may not succeed, which is ok.
2171 bool cas_success = false;
2172 if (kUseBakerReadBarrier) {
2173 cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
2174 ReadBarrier::GrayPtr());
2175 }
2176 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2177 // Already marked.
2178 if (kUseBakerReadBarrier && cas_success &&
2179 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2180 PushOntoFalseGrayStack(ref);
2181 }
2182 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2183 // Already marked in LOS.
2184 if (kUseBakerReadBarrier && cas_success &&
2185 ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
2186 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002187 }
2188 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002189 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002190 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002191 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002192 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002193 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002194 }
2195 }
2196 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002197 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002198}
2199
2200void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002201 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002202 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002203 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002204 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2205 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002206 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002207 {
2208 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2209 skipped_blocks_map_.clear();
2210 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002211 {
2212 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002213 {
2214 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2215 heap_->ClearMarkedObjects();
2216 }
2217 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2218 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2219 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2220 gc::Heap* const heap = Runtime::Current()->GetHeap();
2221 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2222 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2223 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
2224 // Filter out cards that don't need to be set.
2225 if (table != nullptr) {
2226 table->FilterCards();
2227 }
2228 }
2229 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002230 }
2231 if (measure_read_barrier_slow_path_) {
2232 MutexLock mu(self, rb_slow_path_histogram_lock_);
2233 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2234 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2235 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2236 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002237}
2238
Mathieu Chartier97509952015-07-13 14:35:43 -07002239bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002240 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002241 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002242 if (to_ref == nullptr) {
2243 return false;
2244 }
2245 if (from_ref != to_ref) {
2246 QuasiAtomic::ThreadFenceRelease();
2247 field->Assign(to_ref);
2248 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2249 }
2250 return true;
2251}
2252
Mathieu Chartier97509952015-07-13 14:35:43 -07002253mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2254 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002255}
2256
2257void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002258 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002259}
2260
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002261void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002262 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002263 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002264 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2265 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002266 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002267}
2268
2269void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2270 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2271 region_space_->RevokeAllThreadLocalBuffers();
2272}
2273
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002274mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2275 if (Thread::Current() != thread_running_gc_) {
2276 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2277 } else {
2278 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2279 }
2280 ScopedTrace tr(__FUNCTION__);
2281 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2282 mirror::Object* ret = Mark(from_ref);
2283 if (measure_read_barrier_slow_path_) {
2284 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2285 }
2286 return ret;
2287}
2288
2289void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2290 GarbageCollector::DumpPerformanceInfo(os);
2291 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2292 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2293 Histogram<uint64_t>::CumulativeData cumulative_data;
2294 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2295 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2296 }
2297 if (rb_slow_path_count_total_ > 0) {
2298 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2299 }
2300 if (rb_slow_path_count_gc_total_ > 0) {
2301 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2302 }
2303}
2304
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002305} // namespace collector
2306} // namespace gc
2307} // namespace art