blob: 7c649525e423040f2b8b9d46c5fc00fef481f780 [file] [log] [blame]
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
19
20#include "concurrent_copying.h"
21
22#include "gc/accounting/space_bitmap-inl.h"
23#include "gc/heap.h"
24#include "gc/space/region_space.h"
25#include "lock_word.h"
26
27namespace art {
28namespace gc {
29namespace collector {
30
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070031inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080032 mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
33 // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
34 // to gray even though the object has already been marked through. This happens if a mutator
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070035 // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080036 // object (changes it from white to gray and back to white), and the thread runs and
Mathieu Chartierc381c362016-08-23 13:27:53 -070037 // incorrectly changes it from white to gray. If this happens, the object will get added to the
38 // mark stack again and get changed back to white after it is processed.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080039 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070040 // Test the bitmap first to avoid graying an object that has already been marked through most
41 // of the time.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080042 if (bitmap->Test(ref)) {
43 return ref;
44 }
45 }
46 // This may or may not succeed, which is ok because the object may already be gray.
Mathieu Chartierc381c362016-08-23 13:27:53 -070047 bool success = false;
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080048 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070049 // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
50 // we can avoid an expensive CAS.
51 // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
52 // set.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070053 success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080054 } else {
Mathieu Chartierc381c362016-08-23 13:27:53 -070055 success = !bitmap->AtomicTestAndSet(ref);
56 }
57 if (success) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080058 // Newly marked.
59 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070060 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080061 }
62 PushOntoMarkStack(ref);
63 }
64 return ref;
65}
66
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070067template<bool kGrayImmuneObject>
68inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
69 if (kUseBakerReadBarrier) {
70 // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
71 // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
72 // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
73 // immune space objects (when updated_all_immune_objects_ is true).
74 if (kIsDebugBuild) {
75 if (Thread::Current() == thread_running_gc_) {
76 DCHECK(!kGrayImmuneObject ||
77 updated_all_immune_objects_.LoadRelaxed() ||
78 gc_grays_immune_objects_);
79 } else {
80 DCHECK(kGrayImmuneObject);
81 }
82 }
83 if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
84 return ref;
85 }
86 // This may or may not succeed, which is ok because the object may already be gray.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070087 bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
88 ReadBarrier::GrayState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070089 if (success) {
90 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
91 immune_gray_stack_.push_back(ref);
92 }
93 }
94 return ref;
95}
96
Mathieu Chartierc381c362016-08-23 13:27:53 -070097template<bool kGrayImmuneObject, bool kFromGCThread>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070098inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
99 if (from_ref == nullptr) {
100 return nullptr;
101 }
102 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700103 if (kFromGCThread) {
104 DCHECK(is_active_);
105 DCHECK_EQ(Thread::Current(), thread_running_gc_);
106 } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700107 // In the lock word forward address state, the read barrier bits
108 // in the lock word are part of the stored forwarding address and
109 // invalid. This is usually OK as the from-space copy of objects
110 // aren't accessed by mutators due to the to-space
111 // invariant. However, during the dex2oat image writing relocation
112 // and the zygote compaction, objects can be in the forward
113 // address state (to store the forward/relocation addresses) and
114 // they can still be accessed and the invalid read barrier bits
115 // are consulted. If they look like gray but aren't really, the
116 // read barriers slow path can trigger when it shouldn't. To guard
117 // against this, return here if the CC collector isn't running.
118 return from_ref;
119 }
120 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
121 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
122 switch (rtype) {
123 case space::RegionSpace::RegionType::kRegionTypeToSpace:
124 // It's already marked.
125 return from_ref;
126 case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
127 mirror::Object* to_ref = GetFwdPtr(from_ref);
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700128 if (to_ref == nullptr) {
129 // It isn't marked yet. Mark it by copying it to the to-space.
130 to_ref = Copy(from_ref);
131 }
132 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
133 << "from_ref=" << from_ref << " to_ref=" << to_ref;
134 return to_ref;
135 }
136 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700137 return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700138 }
139 case space::RegionSpace::RegionType::kRegionTypeNone:
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700140 if (immune_spaces_.ContainsObject(from_ref)) {
141 return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
142 } else {
143 return MarkNonMoving(from_ref);
144 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700145 default:
146 UNREACHABLE();
147 }
148}
149
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700150inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700151 mirror::Object* ret;
Mathieu Chartier1cf194f2016-11-01 20:13:24 -0700152 if (from_ref == nullptr) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700153 return from_ref;
154 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700155 // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
156 if (UNLIKELY(mark_from_read_barrier_measurements_)) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700157 ret = MarkFromReadBarrierWithMeasurements(from_ref);
158 } else {
159 ret = Mark(from_ref);
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700160 }
Mathieu Chartierd6a595b2016-08-03 18:51:34 -0700161 // Only set the mark bit for baker barrier.
162 if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700163 // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
164 // values are OK since the only race is doing an unnecessary Mark.
165 if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
166 // Mark stack is full, set the bit back to zero.
167 CHECK(ret->AtomicSetMarkBit(1, 0));
168 // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
169 rb_mark_bit_stack_full_ = true;
170 }
171 }
172 return ret;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700173}
174
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700175inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
176 DCHECK(region_space_->IsInFromSpace(from_ref));
177 LockWord lw = from_ref->GetLockWord(false);
178 if (lw.GetState() == LockWord::kForwardingAddress) {
179 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
180 DCHECK(fwd_ptr != nullptr);
181 return fwd_ptr;
182 } else {
183 return nullptr;
184 }
185}
186
Mathieu Chartierc381c362016-08-23 13:27:53 -0700187inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
188 // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700189 // state with an unmarked bit due to reordering.
Mathieu Chartierc381c362016-08-23 13:27:53 -0700190 DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700191 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
Mathieu Chartierc381c362016-08-23 13:27:53 -0700192 return true;
193 }
194 return region_space_bitmap_->Test(from_ref);
195}
196
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700197} // namespace collector
198} // namespace gc
199} // namespace art
200
201#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_