blob: 76f500c2040cd7a3c379c512849fcbcc5baf9ae1 [file] [log] [blame]
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
19
20#include "concurrent_copying.h"
21
22#include "gc/accounting/space_bitmap-inl.h"
23#include "gc/heap.h"
24#include "gc/space/region_space.h"
25#include "lock_word.h"
26
27namespace art {
28namespace gc {
29namespace collector {
30
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070031inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080032 mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
33 // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
34 // to gray even though the object has already been marked through. This happens if a mutator
35 // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
36 // object (changes it from white to gray and back to white), and the thread runs and
Mathieu Chartierc381c362016-08-23 13:27:53 -070037 // incorrectly changes it from white to gray. If this happens, the object will get added to the
38 // mark stack again and get changed back to white after it is processed.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080039 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070040 // Test the bitmap first to avoid graying an object that has already been marked through most
41 // of the time.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080042 if (bitmap->Test(ref)) {
43 return ref;
44 }
45 }
46 // This may or may not succeed, which is ok because the object may already be gray.
Mathieu Chartierc381c362016-08-23 13:27:53 -070047 bool success = false;
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080048 if (kUseBakerReadBarrier) {
Mathieu Chartierc381c362016-08-23 13:27:53 -070049 // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
50 // we can avoid an expensive CAS.
51 // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
52 // set.
53 success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080054 } else {
Mathieu Chartierc381c362016-08-23 13:27:53 -070055 success = !bitmap->AtomicTestAndSet(ref);
56 }
57 if (success) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -080058 // Newly marked.
59 if (kUseBakerReadBarrier) {
60 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
61 }
62 PushOntoMarkStack(ref);
63 }
64 return ref;
65}
66
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070067template<bool kGrayImmuneObject>
68inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
69 if (kUseBakerReadBarrier) {
70 // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
71 // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
72 // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
73 // immune space objects (when updated_all_immune_objects_ is true).
74 if (kIsDebugBuild) {
75 if (Thread::Current() == thread_running_gc_) {
76 DCHECK(!kGrayImmuneObject ||
77 updated_all_immune_objects_.LoadRelaxed() ||
78 gc_grays_immune_objects_);
79 } else {
80 DCHECK(kGrayImmuneObject);
81 }
82 }
83 if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
84 return ref;
85 }
86 // This may or may not succeed, which is ok because the object may already be gray.
87 bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
88 ReadBarrier::GrayPtr());
89 if (success) {
90 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
91 immune_gray_stack_.push_back(ref);
92 }
93 }
94 return ref;
95}
96
Mathieu Chartierc381c362016-08-23 13:27:53 -070097template<bool kGrayImmuneObject, bool kFromGCThread>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070098inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
99 if (from_ref == nullptr) {
100 return nullptr;
101 }
102 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700103 if (kFromGCThread) {
104 DCHECK(is_active_);
105 DCHECK_EQ(Thread::Current(), thread_running_gc_);
106 } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700107 // In the lock word forward address state, the read barrier bits
108 // in the lock word are part of the stored forwarding address and
109 // invalid. This is usually OK as the from-space copy of objects
110 // aren't accessed by mutators due to the to-space
111 // invariant. However, during the dex2oat image writing relocation
112 // and the zygote compaction, objects can be in the forward
113 // address state (to store the forward/relocation addresses) and
114 // they can still be accessed and the invalid read barrier bits
115 // are consulted. If they look like gray but aren't really, the
116 // read barriers slow path can trigger when it shouldn't. To guard
117 // against this, return here if the CC collector isn't running.
118 return from_ref;
119 }
120 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
121 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
122 switch (rtype) {
123 case space::RegionSpace::RegionType::kRegionTypeToSpace:
124 // It's already marked.
125 return from_ref;
126 case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
127 mirror::Object* to_ref = GetFwdPtr(from_ref);
128 if (kUseBakerReadBarrier) {
129 DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
130 << "from_ref=" << from_ref << " to_ref=" << to_ref;
131 }
132 if (to_ref == nullptr) {
133 // It isn't marked yet. Mark it by copying it to the to-space.
134 to_ref = Copy(from_ref);
135 }
136 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
137 << "from_ref=" << from_ref << " to_ref=" << to_ref;
138 return to_ref;
139 }
140 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700141 return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700142 }
143 case space::RegionSpace::RegionType::kRegionTypeNone:
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700144 if (immune_spaces_.ContainsObject(from_ref)) {
145 return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
146 } else {
147 return MarkNonMoving(from_ref);
148 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700149 default:
150 UNREACHABLE();
151 }
152}
153
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700154inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700155 mirror::Object* ret;
156 // TODO: Delete GetMarkBit check when all of the callers properly check the bit. Remaining caller
157 // is array allocations.
158 if (from_ref == nullptr || from_ref->GetMarkBit()) {
159 return from_ref;
160 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700161 // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
162 if (UNLIKELY(mark_from_read_barrier_measurements_)) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700163 ret = MarkFromReadBarrierWithMeasurements(from_ref);
164 } else {
165 ret = Mark(from_ref);
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700166 }
Mathieu Chartierd6a595b2016-08-03 18:51:34 -0700167 // Only set the mark bit for baker barrier.
168 if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700169 // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
170 // values are OK since the only race is doing an unnecessary Mark.
171 if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
172 // Mark stack is full, set the bit back to zero.
173 CHECK(ret->AtomicSetMarkBit(1, 0));
174 // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
175 rb_mark_bit_stack_full_ = true;
176 }
177 }
178 return ret;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700179}
180
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700181inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
182 DCHECK(region_space_->IsInFromSpace(from_ref));
183 LockWord lw = from_ref->GetLockWord(false);
184 if (lw.GetState() == LockWord::kForwardingAddress) {
185 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
186 DCHECK(fwd_ptr != nullptr);
187 return fwd_ptr;
188 } else {
189 return nullptr;
190 }
191}
192
Mathieu Chartierc381c362016-08-23 13:27:53 -0700193inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
194 // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
195 // pointer with an unmarked bit due to reordering.
196 DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
197 if (kUseBakerReadBarrier && from_ref->GetReadBarrierPointerAcquire() == ReadBarrier::GrayPtr()) {
198 return true;
199 }
200 return region_space_bitmap_->Test(from_ref);
201}
202
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700203} // namespace collector
204} // namespace gc
205} // namespace art
206
207#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_