Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "reference_processor.h" |
| 18 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 19 | #include "base/time_utils.h" |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 20 | #include "collector/garbage_collector.h" |
Andreas Gampe | c15a2f4 | 2017-04-21 12:09:39 -0700 | [diff] [blame^] | 21 | #include "java_vm_ext.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 22 | #include "mirror/class-inl.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 23 | #include "mirror/object-inl.h" |
| 24 | #include "mirror/reference-inl.h" |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 25 | #include "reference_processor-inl.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 26 | #include "reflection.h" |
| 27 | #include "ScopedLocalRef.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 28 | #include "scoped_thread_state_change-inl.h" |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 29 | #include "task_processor.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 30 | #include "utils.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 31 | #include "well_known_classes.h" |
| 32 | |
| 33 | namespace art { |
| 34 | namespace gc { |
| 35 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 36 | static constexpr bool kAsyncReferenceQueueAdd = false; |
| 37 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 38 | ReferenceProcessor::ReferenceProcessor() |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 39 | : collector_(nullptr), |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 40 | preserving_references_(false), |
| 41 | condition_("reference processor condition", *Locks::reference_processor_lock_) , |
| 42 | soft_reference_queue_(Locks::reference_queue_soft_references_lock_), |
| 43 | weak_reference_queue_(Locks::reference_queue_weak_references_lock_), |
| 44 | finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_), |
| 45 | phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_), |
| 46 | cleared_references_(Locks::reference_queue_cleared_references_lock_) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | void ReferenceProcessor::EnableSlowPath() { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 50 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | void ReferenceProcessor::DisableSlowPath(Thread* self) { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 54 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 55 | condition_.Broadcast(self); |
| 56 | } |
| 57 | |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 58 | void ReferenceProcessor::BroadcastForSlowPath(Thread* self) { |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 59 | MutexLock mu(self, *Locks::reference_processor_lock_); |
| 60 | condition_.Broadcast(self); |
| 61 | } |
| 62 | |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 63 | ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self, |
| 64 | ObjPtr<mirror::Reference> reference) { |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 65 | if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) { |
| 66 | // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when |
| 67 | // weak ref access is disabled as the call includes a read barrier which may push a ref onto the |
| 68 | // mark stack and interfere with termination of marking. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 69 | ObjPtr<mirror::Object> const referent = reference->GetReferent(); |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 70 | // If the referent is null then it is already cleared, we can just return null since there is no |
| 71 | // scenario where it becomes non-null during the reference processing phase. |
| 72 | if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { |
| 73 | return referent; |
| 74 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 75 | } |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 76 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 77 | while ((!kUseReadBarrier && SlowPathEnabled()) || |
| 78 | (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { |
Mathieu Chartier | 36994ba | 2016-12-13 11:46:28 -0800 | [diff] [blame] | 79 | ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>(); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 80 | // If the referent became cleared, return it. Don't need barrier since thread roots can't get |
| 81 | // updated until after we leave the function due to holding the mutator lock. |
Mathieu Chartier | 36994ba | 2016-12-13 11:46:28 -0800 | [diff] [blame] | 82 | if (referent == nullptr) { |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 83 | return nullptr; |
| 84 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 85 | // Try to see if the referent is already marked by using the is_marked_callback. We can return |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 86 | // it to the mutator as long as the GC is not preserving references. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 87 | if (LIKELY(collector_ != nullptr)) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 88 | // If it's null it means not marked, but it could become marked if the referent is reachable |
Roland Levillain | 91d65e0 | 2016-01-19 15:59:16 +0000 | [diff] [blame] | 89 | // by finalizer referents. So we cannot return in this case and must block. Otherwise, we |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 90 | // can return it to the mutator as long as the GC is not preserving references, in which |
| 91 | // case only black nodes can be safely returned. If the GC is preserving references, the |
| 92 | // mutator could take a white field from a grey or white node and move it somewhere else |
| 93 | // in the heap causing corruption since this field would get swept. |
Mathieu Chartier | 36994ba | 2016-12-13 11:46:28 -0800 | [diff] [blame] | 94 | // Use the cached referent instead of calling GetReferent since other threads could call |
| 95 | // Reference.clear() after we did the null check resulting in a null pointer being |
| 96 | // incorrectly passed to IsMarked. b/33569625 |
| 97 | ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr()); |
| 98 | if (forwarded_ref != nullptr) { |
| 99 | // Non null means that it is marked. |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 100 | if (!preserving_references_ || |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 101 | (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) { |
Mathieu Chartier | 36994ba | 2016-12-13 11:46:28 -0800 | [diff] [blame] | 102 | return forwarded_ref; |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 103 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 104 | } |
| 105 | } |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 106 | // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the |
| 107 | // presence of threads blocking for weak ref access. |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 108 | self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_); |
Mathieu Chartier | 2d1ab0a | 2014-05-08 15:27:31 -0700 | [diff] [blame] | 109 | condition_.WaitHoldingLocks(self); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 110 | } |
| 111 | return reference->GetReferent(); |
| 112 | } |
| 113 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 114 | void ReferenceProcessor::StartPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 115 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 116 | preserving_references_ = true; |
| 117 | } |
| 118 | |
| 119 | void ReferenceProcessor::StopPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 120 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 121 | preserving_references_ = false; |
| 122 | // We are done preserving references, some people who are blocked may see a marked referent. |
| 123 | condition_.Broadcast(self); |
| 124 | } |
| 125 | |
| 126 | // Process reference class instances and schedule finalizations. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 127 | void ReferenceProcessor::ProcessReferences(bool concurrent, |
| 128 | TimingLogger* timings, |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 129 | bool clear_soft_references, |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 130 | collector::GarbageCollector* collector) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 131 | TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 132 | Thread* self = Thread::Current(); |
| 133 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 134 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 135 | collector_ = collector; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 136 | if (!kUseReadBarrier) { |
| 137 | CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent"; |
| 138 | } else { |
| 139 | // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false). |
| 140 | CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent); |
| 141 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 142 | } |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 143 | if (kIsDebugBuild && collector->IsTransactionActive()) { |
| 144 | // In transaction mode, we shouldn't enqueue any Reference to the queues. |
| 145 | // See DelayReferenceReferent(). |
| 146 | DCHECK(soft_reference_queue_.IsEmpty()); |
| 147 | DCHECK(weak_reference_queue_.IsEmpty()); |
| 148 | DCHECK(finalizer_reference_queue_.IsEmpty()); |
| 149 | DCHECK(phantom_reference_queue_.IsEmpty()); |
| 150 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 151 | // Unless required to clear soft references with white references, preserve some white referents. |
| 152 | if (!clear_soft_references) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 153 | TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" : |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 154 | "(Paused)ForwardSoftReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 155 | if (concurrent) { |
| 156 | StartPreservingReferences(self); |
| 157 | } |
Mathieu Chartier | 8118781 | 2015-07-15 14:24:07 -0700 | [diff] [blame] | 158 | // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional |
| 159 | // mark if the SoftReference is supposed to be preserved. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 160 | soft_reference_queue_.ForwardSoftReferences(collector); |
| 161 | collector->ProcessMarkStack(); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 162 | if (concurrent) { |
| 163 | StopPreservingReferences(self); |
| 164 | } |
| 165 | } |
| 166 | // Clear all remaining soft and weak references with white referents. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 167 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); |
| 168 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 169 | { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 170 | TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" : |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 171 | "(Paused)EnqueueFinalizerReferences", timings); |
| 172 | if (concurrent) { |
| 173 | StartPreservingReferences(self); |
| 174 | } |
| 175 | // Preserve all white objects with finalize methods and schedule them for finalization. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 176 | finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector); |
| 177 | collector->ProcessMarkStack(); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 178 | if (concurrent) { |
| 179 | StopPreservingReferences(self); |
| 180 | } |
| 181 | } |
| 182 | // Clear all finalizer referent reachable soft and weak references with white referents. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 183 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); |
| 184 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 185 | // Clear all phantom references with white referents. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 186 | phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 187 | // At this point all reference queues other than the cleared references should be empty. |
| 188 | DCHECK(soft_reference_queue_.IsEmpty()); |
| 189 | DCHECK(weak_reference_queue_.IsEmpty()); |
| 190 | DCHECK(finalizer_reference_queue_.IsEmpty()); |
| 191 | DCHECK(phantom_reference_queue_.IsEmpty()); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 192 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 193 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 194 | // Need to always do this since the next GC may be concurrent. Doing this for only concurrent |
| 195 | // could result in a stale is_marked_callback_ being called before the reference processing |
| 196 | // starts since there is a small window of time where slow_path_enabled_ is enabled but the |
| 197 | // callback isn't yet set. |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 198 | collector_ = nullptr; |
| 199 | if (!kUseReadBarrier && concurrent) { |
| 200 | // Done processing, disable the slow path and broadcast to the waiters. |
| 201 | DisableSlowPath(self); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 202 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 203 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 207 | // marked, put it on the appropriate list in the heap for later processing. |
Mathieu Chartier | 5d3f73a | 2016-10-14 14:28:47 -0700 | [diff] [blame] | 208 | void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass, |
| 209 | ObjPtr<mirror::Reference> ref, |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 210 | collector::GarbageCollector* collector) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 211 | // klass can be the class of the old object if the visitor already updated the class of ref. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 212 | DCHECK(klass != nullptr); |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 213 | DCHECK(klass->IsTypeOfReferenceClass()); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 214 | mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); |
Hiroshi Yamauchi | 65f5f24 | 2016-12-19 11:44:47 -0800 | [diff] [blame] | 215 | // do_atomic_update needs to be true because this happens outside of the reference processing |
| 216 | // phase. |
| 217 | if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) { |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 218 | if (UNLIKELY(collector->IsTransactionActive())) { |
| 219 | // In transaction mode, keep the referent alive and avoid any reference processing to avoid the |
| 220 | // issue of rolling back reference processing. do_atomic_update needs to be true because this |
| 221 | // happens outside of the reference processing phase. |
| 222 | if (!referent->IsNull()) { |
| 223 | collector->MarkHeapReference(referent, /*do_atomic_update*/ true); |
| 224 | } |
| 225 | return; |
| 226 | } |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 227 | Thread* self = Thread::Current(); |
| 228 | // TODO: Remove these locks, and use atomic stacks for storing references? |
| 229 | // We need to check that the references haven't already been enqueued since we can end up |
| 230 | // scanning the same reference multiple times due to dirty cards. |
| 231 | if (klass->IsSoftReferenceClass()) { |
| 232 | soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 233 | } else if (klass->IsWeakReferenceClass()) { |
| 234 | weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 235 | } else if (klass->IsFinalizerReferenceClass()) { |
| 236 | finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 237 | } else if (klass->IsPhantomReferenceClass()) { |
| 238 | phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 239 | } else { |
David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 240 | LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 241 | << klass->GetAccessFlags(); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 242 | } |
| 243 | } |
| 244 | } |
| 245 | |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 246 | void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) { |
| 247 | cleared_references_.UpdateRoots(visitor); |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 250 | class ClearedReferenceTask : public HeapTask { |
| 251 | public: |
| 252 | explicit ClearedReferenceTask(jobject cleared_references) |
| 253 | : HeapTask(NanoTime()), cleared_references_(cleared_references) { |
| 254 | } |
| 255 | virtual void Run(Thread* thread) { |
| 256 | ScopedObjectAccess soa(thread); |
| 257 | jvalue args[1]; |
| 258 | args[0].l = cleared_references_; |
| 259 | InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args); |
| 260 | soa.Env()->DeleteGlobalRef(cleared_references_); |
| 261 | } |
| 262 | |
| 263 | private: |
| 264 | const jobject cleared_references_; |
| 265 | }; |
| 266 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 267 | void ReferenceProcessor::EnqueueClearedReferences(Thread* self) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 268 | Locks::mutator_lock_->AssertNotHeld(self); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 269 | // When a runtime isn't started there are no reference queues to care about so ignore. |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 270 | if (!cleared_references_.IsEmpty()) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 271 | if (LIKELY(Runtime::Current()->IsStarted())) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 272 | jobject cleared_references; |
| 273 | { |
| 274 | ReaderMutexLock mu(self, *Locks::mutator_lock_); |
| 275 | cleared_references = self->GetJniEnv()->vm->AddGlobalRef( |
| 276 | self, cleared_references_.GetList()); |
| 277 | } |
| 278 | if (kAsyncReferenceQueueAdd) { |
| 279 | // TODO: This can cause RunFinalization to terminate before newly freed objects are |
| 280 | // finalized since they may not be enqueued by the time RunFinalization starts. |
| 281 | Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask( |
| 282 | self, new ClearedReferenceTask(cleared_references)); |
| 283 | } else { |
| 284 | ClearedReferenceTask task(cleared_references); |
| 285 | task.Run(self); |
| 286 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 287 | } |
| 288 | cleared_references_.Clear(); |
| 289 | } |
| 290 | } |
| 291 | |
Mathieu Chartier | c9a7028 | 2016-12-13 14:44:33 -0800 | [diff] [blame] | 292 | void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 293 | Thread* self = Thread::Current(); |
| 294 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | c9a7028 | 2016-12-13 14:44:33 -0800 | [diff] [blame] | 295 | // Need to wait until reference processing is done since IsMarkedHeapReference does not have a |
| 296 | // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions. |
| 297 | // This also handles the race where the referent gets cleared after a null check but before |
| 298 | // IsMarkedHeapReference is called. |
| 299 | WaitUntilDoneProcessingReferences(self); |
| 300 | if (Runtime::Current()->IsActiveTransaction()) { |
| 301 | ref->ClearReferent<true>(); |
| 302 | } else { |
| 303 | ref->ClearReferent<false>(); |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) { |
| 308 | // Wait until we are done processing reference. |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 309 | while ((!kUseReadBarrier && SlowPathEnabled()) || |
| 310 | (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { |
Hiroshi Yamauchi | 3049324 | 2016-11-03 13:06:52 -0700 | [diff] [blame] | 311 | // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the |
| 312 | // presence of threads blocking for weak ref access. |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 313 | self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_); |
Pavel Vyssotski | d64ba38 | 2014-12-15 18:00:17 +0600 | [diff] [blame] | 314 | condition_.WaitHoldingLocks(self); |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 315 | } |
Mathieu Chartier | c9a7028 | 2016-12-13 14:44:33 -0800 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | bool ReferenceProcessor::MakeCircularListIfUnenqueued( |
| 319 | ObjPtr<mirror::FinalizerReference> reference) { |
| 320 | Thread* self = Thread::Current(); |
| 321 | MutexLock mu(self, *Locks::reference_processor_lock_); |
| 322 | WaitUntilDoneProcessingReferences(self); |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 323 | // At this point, since the sentinel of the reference is live, it is guaranteed to not be |
| 324 | // enqueued if we just finished processing references. Otherwise, we may be doing the main GC |
| 325 | // phase. Since we are holding the reference processor lock, it guarantees that reference |
| 326 | // processing can't begin. The GC could have just enqueued the reference one one of the internal |
| 327 | // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this |
| 328 | // race. |
| 329 | MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_); |
Richard Uhler | c4695df | 2016-01-15 14:08:05 -0800 | [diff] [blame] | 330 | if (reference->IsUnprocessed()) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 331 | CHECK(reference->IsFinalizerReferenceInstance()); |
Richard Uhler | 522d51b | 2016-01-22 14:18:57 -0800 | [diff] [blame] | 332 | reference->SetPendingNext(reference); |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 333 | return true; |
| 334 | } |
| 335 | return false; |
| 336 | } |
| 337 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 338 | } // namespace gc |
| 339 | } // namespace art |