Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ |
| 18 | #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ |
| 19 | |
| 20 | #include "concurrent_copying.h" |
| 21 | |
| 22 | #include "gc/accounting/space_bitmap-inl.h" |
| 23 | #include "gc/heap.h" |
| 24 | #include "gc/space/region_space.h" |
| 25 | #include "lock_word.h" |
| 26 | |
| 27 | namespace art { |
| 28 | namespace gc { |
| 29 | namespace collector { |
| 30 | |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 31 | inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 32 | mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) { |
| 33 | // For the Baker-style RB, in a rare case, we could incorrectly change the object from white |
| 34 | // to gray even though the object has already been marked through. This happens if a mutator |
| 35 | // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the |
| 36 | // object (changes it from white to gray and back to white), and the thread runs and |
| 37 | // incorrectly changes it from white to gray. We need to detect such "false gray" cases and |
| 38 | // change the objects back to white at the end of marking. |
| 39 | if (kUseBakerReadBarrier) { |
| 40 | // Test the bitmap first to reduce the chance of false gray cases. |
| 41 | if (bitmap->Test(ref)) { |
| 42 | return ref; |
| 43 | } |
| 44 | } |
| 45 | // This may or may not succeed, which is ok because the object may already be gray. |
| 46 | bool cas_success = false; |
| 47 | if (kUseBakerReadBarrier) { |
| 48 | cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), |
| 49 | ReadBarrier::GrayPtr()); |
| 50 | } |
| 51 | if (bitmap->AtomicTestAndSet(ref)) { |
| 52 | // Already marked. |
| 53 | if (kUseBakerReadBarrier && |
| 54 | cas_success && |
| 55 | // The object could be white here if a thread gets preempted after a success at the |
| 56 | // above AtomicSetReadBarrierPointer, GC has marked through it, and the thread runs up |
| 57 | // to this point. |
| 58 | ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) { |
| 59 | // Register a "false-gray" object to change it from gray to white at the end of marking. |
| 60 | PushOntoFalseGrayStack(ref); |
| 61 | } |
| 62 | } else { |
| 63 | // Newly marked. |
| 64 | if (kUseBakerReadBarrier) { |
| 65 | DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); |
| 66 | } |
| 67 | PushOntoMarkStack(ref); |
| 68 | } |
| 69 | return ref; |
| 70 | } |
| 71 | |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 72 | template<bool kGrayImmuneObject> |
| 73 | inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) { |
| 74 | if (kUseBakerReadBarrier) { |
| 75 | // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots |
| 76 | // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is |
| 77 | // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all |
| 78 | // immune space objects (when updated_all_immune_objects_ is true). |
| 79 | if (kIsDebugBuild) { |
| 80 | if (Thread::Current() == thread_running_gc_) { |
| 81 | DCHECK(!kGrayImmuneObject || |
| 82 | updated_all_immune_objects_.LoadRelaxed() || |
| 83 | gc_grays_immune_objects_); |
| 84 | } else { |
| 85 | DCHECK(kGrayImmuneObject); |
| 86 | } |
| 87 | } |
| 88 | if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) { |
| 89 | return ref; |
| 90 | } |
| 91 | // This may or may not succeed, which is ok because the object may already be gray. |
| 92 | bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), |
| 93 | ReadBarrier::GrayPtr()); |
| 94 | if (success) { |
| 95 | MutexLock mu(Thread::Current(), immune_gray_stack_lock_); |
| 96 | immune_gray_stack_.push_back(ref); |
| 97 | } |
| 98 | } |
| 99 | return ref; |
| 100 | } |
| 101 | |
| 102 | template<bool kGrayImmuneObject> |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 103 | inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { |
| 104 | if (from_ref == nullptr) { |
| 105 | return nullptr; |
| 106 | } |
| 107 | DCHECK(heap_->collector_type_ == kCollectorTypeCC); |
| 108 | if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) { |
| 109 | // In the lock word forward address state, the read barrier bits |
| 110 | // in the lock word are part of the stored forwarding address and |
| 111 | // invalid. This is usually OK as the from-space copy of objects |
| 112 | // aren't accessed by mutators due to the to-space |
| 113 | // invariant. However, during the dex2oat image writing relocation |
| 114 | // and the zygote compaction, objects can be in the forward |
| 115 | // address state (to store the forward/relocation addresses) and |
| 116 | // they can still be accessed and the invalid read barrier bits |
| 117 | // are consulted. If they look like gray but aren't really, the |
| 118 | // read barriers slow path can trigger when it shouldn't. To guard |
| 119 | // against this, return here if the CC collector isn't running. |
| 120 | return from_ref; |
| 121 | } |
| 122 | DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; |
| 123 | space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); |
| 124 | switch (rtype) { |
| 125 | case space::RegionSpace::RegionType::kRegionTypeToSpace: |
| 126 | // It's already marked. |
| 127 | return from_ref; |
| 128 | case space::RegionSpace::RegionType::kRegionTypeFromSpace: { |
| 129 | mirror::Object* to_ref = GetFwdPtr(from_ref); |
| 130 | if (kUseBakerReadBarrier) { |
| 131 | DCHECK_NE(to_ref, ReadBarrier::GrayPtr()) |
| 132 | << "from_ref=" << from_ref << " to_ref=" << to_ref; |
| 133 | } |
| 134 | if (to_ref == nullptr) { |
| 135 | // It isn't marked yet. Mark it by copying it to the to-space. |
| 136 | to_ref = Copy(from_ref); |
| 137 | } |
| 138 | DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) |
| 139 | << "from_ref=" << from_ref << " to_ref=" << to_ref; |
| 140 | return to_ref; |
| 141 | } |
| 142 | case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: { |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 143 | return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_); |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 144 | } |
| 145 | case space::RegionSpace::RegionType::kRegionTypeNone: |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 146 | if (immune_spaces_.ContainsObject(from_ref)) { |
| 147 | return MarkImmuneSpace<kGrayImmuneObject>(from_ref); |
| 148 | } else { |
| 149 | return MarkNonMoving(from_ref); |
| 150 | } |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 151 | default: |
| 152 | UNREACHABLE(); |
| 153 | } |
| 154 | } |
| 155 | |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 156 | inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) { |
| 157 | // TODO: Consider removing this check when we are done investigating slow paths. b/30162165 |
| 158 | if (UNLIKELY(mark_from_read_barrier_measurements_)) { |
| 159 | return MarkFromReadBarrierWithMeasurements(from_ref); |
| 160 | } |
| 161 | return Mark(from_ref); |
| 162 | } |
| 163 | |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 164 | inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { |
| 165 | DCHECK(region_space_->IsInFromSpace(from_ref)); |
| 166 | LockWord lw = from_ref->GetLockWord(false); |
| 167 | if (lw.GetState() == LockWord::kForwardingAddress) { |
| 168 | mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); |
| 169 | DCHECK(fwd_ptr != nullptr); |
| 170 | return fwd_ptr; |
| 171 | } else { |
| 172 | return nullptr; |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | } // namespace collector |
| 177 | } // namespace gc |
| 178 | } // namespace art |
| 179 | |
| 180 | #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ |