Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "reference_processor.h" |
| 18 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 19 | #include "base/time_utils.h" |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 20 | #include "mirror/class-inl.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 21 | #include "mirror/object-inl.h" |
| 22 | #include "mirror/reference-inl.h" |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 23 | #include "reference_processor-inl.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 24 | #include "reflection.h" |
| 25 | #include "ScopedLocalRef.h" |
| 26 | #include "scoped_thread_state_change.h" |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 27 | #include "task_processor.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 28 | #include "utils.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 29 | #include "well_known_classes.h" |
| 30 | |
| 31 | namespace art { |
| 32 | namespace gc { |
| 33 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 34 | static constexpr bool kAsyncReferenceQueueAdd = false; |
| 35 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 36 | ReferenceProcessor::ReferenceProcessor() |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 37 | : process_references_args_(nullptr, nullptr, nullptr), |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 38 | preserving_references_(false), |
| 39 | condition_("reference processor condition", *Locks::reference_processor_lock_) , |
| 40 | soft_reference_queue_(Locks::reference_queue_soft_references_lock_), |
| 41 | weak_reference_queue_(Locks::reference_queue_weak_references_lock_), |
| 42 | finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_), |
| 43 | phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_), |
| 44 | cleared_references_(Locks::reference_queue_cleared_references_lock_) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | void ReferenceProcessor::EnableSlowPath() { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 48 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | void ReferenceProcessor::DisableSlowPath(Thread* self) { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 52 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 53 | condition_.Broadcast(self); |
| 54 | } |
| 55 | |
| 56 | mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) { |
| 57 | mirror::Object* const referent = reference->GetReferent(); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 58 | // If the referent is null then it is already cleared, we can just return null since there is no |
| 59 | // scenario where it becomes non-null during the reference processing phase. |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 60 | if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 61 | return referent; |
| 62 | } |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 63 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 64 | while (SlowPathEnabled()) { |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 65 | mirror::HeapReference<mirror::Object>* const referent_addr = |
| 66 | reference->GetReferentReferenceAddr(); |
| 67 | // If the referent became cleared, return it. Don't need barrier since thread roots can't get |
| 68 | // updated until after we leave the function due to holding the mutator lock. |
| 69 | if (referent_addr->AsMirrorPtr() == nullptr) { |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 70 | return nullptr; |
| 71 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 72 | // Try to see if the referent is already marked by using the is_marked_callback. We can return |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 73 | // it to the mutator as long as the GC is not preserving references. |
| 74 | IsHeapReferenceMarkedCallback* const is_marked_callback = |
| 75 | process_references_args_.is_marked_callback_; |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 76 | if (LIKELY(is_marked_callback != nullptr)) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 77 | // If it's null it means not marked, but it could become marked if the referent is reachable |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 78 | // by finalizer referents. So we can not return in this case and must block. Otherwise, we |
| 79 | // can return it to the mutator as long as the GC is not preserving references, in which |
| 80 | // case only black nodes can be safely returned. If the GC is preserving references, the |
| 81 | // mutator could take a white field from a grey or white node and move it somewhere else |
| 82 | // in the heap causing corruption since this field would get swept. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 83 | if (is_marked_callback(referent_addr, process_references_args_.arg_)) { |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 84 | if (!preserving_references_ || |
| 85 | (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) { |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 86 | return referent_addr->AsMirrorPtr(); |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 87 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 88 | } |
| 89 | } |
Mathieu Chartier | 2d1ab0a | 2014-05-08 15:27:31 -0700 | [diff] [blame] | 90 | condition_.WaitHoldingLocks(self); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 91 | } |
| 92 | return reference->GetReferent(); |
| 93 | } |
| 94 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 95 | bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, |
| 96 | void* arg) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 97 | auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 98 | // TODO: Add smarter logic for preserving soft references. |
| 99 | mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_); |
| 100 | DCHECK(new_obj != nullptr); |
| 101 | obj->Assign(new_obj); |
| 102 | return true; |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | void ReferenceProcessor::StartPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 106 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 107 | preserving_references_ = true; |
| 108 | } |
| 109 | |
| 110 | void ReferenceProcessor::StopPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 111 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 112 | preserving_references_ = false; |
| 113 | // We are done preserving references, some people who are blocked may see a marked referent. |
| 114 | condition_.Broadcast(self); |
| 115 | } |
| 116 | |
| 117 | // Process reference class instances and schedule finalizations. |
| 118 | void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings, |
| 119 | bool clear_soft_references, |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 120 | IsHeapReferenceMarkedCallback* is_marked_callback, |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 121 | MarkObjectCallback* mark_object_callback, |
| 122 | ProcessMarkStackCallback* process_mark_stack_callback, |
| 123 | void* arg) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 124 | TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 125 | Thread* self = Thread::Current(); |
| 126 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 127 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 128 | process_references_args_.is_marked_callback_ = is_marked_callback; |
| 129 | process_references_args_.mark_callback_ = mark_object_callback; |
| 130 | process_references_args_.arg_ = arg; |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 131 | CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent"; |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 132 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 133 | // Unless required to clear soft references with white references, preserve some white referents. |
| 134 | if (!clear_soft_references) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 135 | TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" : |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 136 | "(Paused)ForwardSoftReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 137 | if (concurrent) { |
| 138 | StartPreservingReferences(self); |
| 139 | } |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 140 | soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback, |
| 141 | &process_references_args_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 142 | process_mark_stack_callback(arg); |
| 143 | if (concurrent) { |
| 144 | StopPreservingReferences(self); |
| 145 | } |
| 146 | } |
| 147 | // Clear all remaining soft and weak references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 148 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
| 149 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 150 | { |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 151 | TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" : |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 152 | "(Paused)EnqueueFinalizerReferences", timings); |
| 153 | if (concurrent) { |
| 154 | StartPreservingReferences(self); |
| 155 | } |
| 156 | // Preserve all white objects with finalize methods and schedule them for finalization. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 157 | finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback, |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 158 | mark_object_callback, arg); |
| 159 | process_mark_stack_callback(arg); |
| 160 | if (concurrent) { |
| 161 | StopPreservingReferences(self); |
| 162 | } |
| 163 | } |
| 164 | // Clear all finalizer referent reachable soft and weak references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 165 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
| 166 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 167 | // Clear all phantom references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 168 | phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 169 | // At this point all reference queues other than the cleared references should be empty. |
| 170 | DCHECK(soft_reference_queue_.IsEmpty()); |
| 171 | DCHECK(weak_reference_queue_.IsEmpty()); |
| 172 | DCHECK(finalizer_reference_queue_.IsEmpty()); |
| 173 | DCHECK(phantom_reference_queue_.IsEmpty()); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 174 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 175 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 176 | // Need to always do this since the next GC may be concurrent. Doing this for only concurrent |
| 177 | // could result in a stale is_marked_callback_ being called before the reference processing |
| 178 | // starts since there is a small window of time where slow_path_enabled_ is enabled but the |
| 179 | // callback isn't yet set. |
| 180 | process_references_args_.is_marked_callback_ = nullptr; |
| 181 | if (concurrent) { |
| 182 | // Done processing, disable the slow path and broadcast to the waiters. |
| 183 | DisableSlowPath(self); |
| 184 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 185 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 189 | // marked, put it on the appropriate list in the heap for later processing. |
| 190 | void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 191 | IsHeapReferenceMarkedCallback* is_marked_callback, |
| 192 | void* arg) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 193 | // klass can be the class of the old object if the visitor already updated the class of ref. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 194 | DCHECK(klass != nullptr); |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 195 | DCHECK(klass->IsTypeOfReferenceClass()); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 196 | mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); |
| 197 | if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) { |
| 198 | Thread* self = Thread::Current(); |
| 199 | // TODO: Remove these locks, and use atomic stacks for storing references? |
| 200 | // We need to check that the references haven't already been enqueued since we can end up |
| 201 | // scanning the same reference multiple times due to dirty cards. |
| 202 | if (klass->IsSoftReferenceClass()) { |
| 203 | soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 204 | } else if (klass->IsWeakReferenceClass()) { |
| 205 | weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 206 | } else if (klass->IsFinalizerReferenceClass()) { |
| 207 | finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 208 | } else if (klass->IsPhantomReferenceClass()) { |
| 209 | phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 210 | } else { |
| 211 | LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex |
| 212 | << klass->GetAccessFlags(); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 213 | } |
| 214 | } |
| 215 | } |
| 216 | |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 217 | void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) { |
| 218 | cleared_references_.UpdateRoots(callback, arg); |
| 219 | } |
| 220 | |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 221 | class ClearedReferenceTask : public HeapTask { |
| 222 | public: |
| 223 | explicit ClearedReferenceTask(jobject cleared_references) |
| 224 | : HeapTask(NanoTime()), cleared_references_(cleared_references) { |
| 225 | } |
| 226 | virtual void Run(Thread* thread) { |
| 227 | ScopedObjectAccess soa(thread); |
| 228 | jvalue args[1]; |
| 229 | args[0].l = cleared_references_; |
| 230 | InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args); |
| 231 | soa.Env()->DeleteGlobalRef(cleared_references_); |
| 232 | } |
| 233 | |
| 234 | private: |
| 235 | const jobject cleared_references_; |
| 236 | }; |
| 237 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 238 | void ReferenceProcessor::EnqueueClearedReferences(Thread* self) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 239 | Locks::mutator_lock_->AssertNotHeld(self); |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 240 | // When a runtime isn't started there are no reference queues to care about so ignore. |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 241 | if (!cleared_references_.IsEmpty()) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 242 | if (LIKELY(Runtime::Current()->IsStarted())) { |
Mathieu Chartier | a5eae69 | 2014-12-17 17:56:03 -0800 | [diff] [blame] | 243 | jobject cleared_references; |
| 244 | { |
| 245 | ReaderMutexLock mu(self, *Locks::mutator_lock_); |
| 246 | cleared_references = self->GetJniEnv()->vm->AddGlobalRef( |
| 247 | self, cleared_references_.GetList()); |
| 248 | } |
| 249 | if (kAsyncReferenceQueueAdd) { |
| 250 | // TODO: This can cause RunFinalization to terminate before newly freed objects are |
| 251 | // finalized since they may not be enqueued by the time RunFinalization starts. |
| 252 | Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask( |
| 253 | self, new ClearedReferenceTask(cleared_references)); |
| 254 | } else { |
| 255 | ClearedReferenceTask task(cleared_references); |
| 256 | task.Run(self); |
| 257 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 258 | } |
| 259 | cleared_references_.Clear(); |
| 260 | } |
| 261 | } |
| 262 | |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 263 | bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) { |
| 264 | Thread* self = Thread::Current(); |
| 265 | MutexLock mu(self, *Locks::reference_processor_lock_); |
| 266 | // Wait untul we are done processing reference. |
| 267 | while (SlowPathEnabled()) { |
Pavel Vyssotski | d64ba38 | 2014-12-15 18:00:17 +0600 | [diff] [blame] | 268 | condition_.WaitHoldingLocks(self); |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 269 | } |
| 270 | // At this point, since the sentinel of the reference is live, it is guaranteed to not be |
| 271 | // enqueued if we just finished processing references. Otherwise, we may be doing the main GC |
| 272 | // phase. Since we are holding the reference processor lock, it guarantees that reference |
| 273 | // processing can't begin. The GC could have just enqueued the reference one one of the internal |
| 274 | // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this |
| 275 | // race. |
| 276 | MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_); |
| 277 | if (!reference->IsEnqueued()) { |
| 278 | CHECK(reference->IsFinalizerReferenceInstance()); |
| 279 | if (Runtime::Current()->IsActiveTransaction()) { |
| 280 | reference->SetPendingNext<true>(reference); |
| 281 | } else { |
| 282 | reference->SetPendingNext<false>(reference); |
| 283 | } |
| 284 | return true; |
| 285 | } |
| 286 | return false; |
| 287 | } |
| 288 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 289 | } // namespace gc |
| 290 | } // namespace art |