blob: 798ecd3d87be540fd331a2894bdb11448f0b3fc4 [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/time_utils.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070020#include "collector/garbage_collector.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "mirror/class-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070022#include "mirror/object-inl.h"
23#include "mirror/reference-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070024#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070025#include "reflection.h"
26#include "ScopedLocalRef.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070027#include "scoped_thread_state_change-inl.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080028#include "task_processor.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010029#include "utils.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070030#include "well_known_classes.h"
31
32namespace art {
33namespace gc {
34
Mathieu Chartiera5eae692014-12-17 17:56:03 -080035static constexpr bool kAsyncReferenceQueueAdd = false;
36
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070037ReferenceProcessor::ReferenceProcessor()
Mathieu Chartier97509952015-07-13 14:35:43 -070038 : collector_(nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070039 preserving_references_(false),
40 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
41 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
42 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
43 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
44 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
45 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070046}
47
48void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070049 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070050}
51
52void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070053 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070054 condition_.Broadcast(self);
55}
56
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070057void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
58 CHECK(kUseReadBarrier);
59 MutexLock mu(self, *Locks::reference_processor_lock_);
60 condition_.Broadcast(self);
61}
62
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070063ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
64 ObjPtr<mirror::Reference> reference) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
66 // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
67 // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
68 // mark stack and interfere with termination of marking.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070069 ObjPtr<mirror::Object> const referent = reference->GetReferent();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 // If the referent is null then it is already cleared, we can just return null since there is no
71 // scenario where it becomes non-null during the reference processing phase.
72 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
73 return referent;
74 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070075 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070076 MutexLock mu(self, *Locks::reference_processor_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070077 while ((!kUseReadBarrier && SlowPathEnabled()) ||
78 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070079 mirror::HeapReference<mirror::Object>* const referent_addr =
80 reference->GetReferentReferenceAddr();
81 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
82 // updated until after we leave the function due to holding the mutator lock.
83 if (referent_addr->AsMirrorPtr() == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070084 return nullptr;
85 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070086 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070087 // it to the mutator as long as the GC is not preserving references.
Mathieu Chartier97509952015-07-13 14:35:43 -070088 if (LIKELY(collector_ != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070089 // If it's null it means not marked, but it could become marked if the referent is reachable
Roland Levillain91d65e02016-01-19 15:59:16 +000090 // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
Fred Shih530e1b52014-06-09 15:19:54 -070091 // can return it to the mutator as long as the GC is not preserving references, in which
92 // case only black nodes can be safely returned. If the GC is preserving references, the
93 // mutator could take a white field from a grey or white node and move it somewhere else
94 // in the heap causing corruption since this field would get swept.
Mathieu Chartier97509952015-07-13 14:35:43 -070095 if (collector_->IsMarkedHeapReference(referent_addr)) {
Fred Shih530e1b52014-06-09 15:19:54 -070096 if (!preserving_references_ ||
Richard Uhlerc4695df2016-01-15 14:08:05 -080097 (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070098 return referent_addr->AsMirrorPtr();
Fred Shih530e1b52014-06-09 15:19:54 -070099 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700100 }
101 }
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -0700102 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700103 }
104 return reference->GetReferent();
105}
106
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700107void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700108 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700109 preserving_references_ = true;
110}
111
112void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700113 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700114 preserving_references_ = false;
115 // We are done preserving references, some people who are blocked may see a marked referent.
116 condition_.Broadcast(self);
117}
118
119// Process reference class instances and schedule finalizations.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700120void ReferenceProcessor::ProcessReferences(bool concurrent,
121 TimingLogger* timings,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700122 bool clear_soft_references,
Mathieu Chartier97509952015-07-13 14:35:43 -0700123 collector::GarbageCollector* collector) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700124 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700125 Thread* self = Thread::Current();
126 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700127 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700128 collector_ = collector;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700129 if (!kUseReadBarrier) {
130 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
131 } else {
132 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
133 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
134 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700135 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700136 // Unless required to clear soft references with white references, preserve some white referents.
137 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700138 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700139 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700140 if (concurrent) {
141 StartPreservingReferences(self);
142 }
Mathieu Chartier81187812015-07-15 14:24:07 -0700143 // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
144 // mark if the SoftReference is supposed to be preserved.
Mathieu Chartier97509952015-07-13 14:35:43 -0700145 soft_reference_queue_.ForwardSoftReferences(collector);
146 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700147 if (concurrent) {
148 StopPreservingReferences(self);
149 }
150 }
151 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700152 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
153 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700154 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800155 TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700156 "(Paused)EnqueueFinalizerReferences", timings);
157 if (concurrent) {
158 StartPreservingReferences(self);
159 }
160 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier97509952015-07-13 14:35:43 -0700161 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
162 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700163 if (concurrent) {
164 StopPreservingReferences(self);
165 }
166 }
167 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700168 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
169 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700170 // Clear all phantom references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700171 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700172 // At this point all reference queues other than the cleared references should be empty.
173 DCHECK(soft_reference_queue_.IsEmpty());
174 DCHECK(weak_reference_queue_.IsEmpty());
175 DCHECK(finalizer_reference_queue_.IsEmpty());
176 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700177 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700178 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700179 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
180 // could result in a stale is_marked_callback_ being called before the reference processing
181 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
182 // callback isn't yet set.
Mathieu Chartier97509952015-07-13 14:35:43 -0700183 collector_ = nullptr;
184 if (!kUseReadBarrier && concurrent) {
185 // Done processing, disable the slow path and broadcast to the waiters.
186 DisableSlowPath(self);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700187 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700188 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700189}
190
191// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
192// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700193void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
194 ObjPtr<mirror::Reference> ref,
Mathieu Chartier97509952015-07-13 14:35:43 -0700195 collector::GarbageCollector* collector) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700196 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700197 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700198 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700199 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
Mathieu Chartier97509952015-07-13 14:35:43 -0700200 if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
Mathieu Chartier308351a2014-06-15 12:39:02 -0700201 Thread* self = Thread::Current();
202 // TODO: Remove these locks, and use atomic stacks for storing references?
203 // We need to check that the references haven't already been enqueued since we can end up
204 // scanning the same reference multiple times due to dirty cards.
205 if (klass->IsSoftReferenceClass()) {
206 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
207 } else if (klass->IsWeakReferenceClass()) {
208 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
209 } else if (klass->IsFinalizerReferenceClass()) {
210 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
211 } else if (klass->IsPhantomReferenceClass()) {
212 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
213 } else {
David Sehr709b0702016-10-13 09:12:37 -0700214 LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
Mathieu Chartier308351a2014-06-15 12:39:02 -0700215 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700216 }
217 }
218}
219
Mathieu Chartier97509952015-07-13 14:35:43 -0700220void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
221 cleared_references_.UpdateRoots(visitor);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700222}
223
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800224class ClearedReferenceTask : public HeapTask {
225 public:
226 explicit ClearedReferenceTask(jobject cleared_references)
227 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
228 }
229 virtual void Run(Thread* thread) {
230 ScopedObjectAccess soa(thread);
231 jvalue args[1];
232 args[0].l = cleared_references_;
233 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
234 soa.Env()->DeleteGlobalRef(cleared_references_);
235 }
236
237 private:
238 const jobject cleared_references_;
239};
240
Mathieu Chartier308351a2014-06-15 12:39:02 -0700241void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700242 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800243 // When a runtime isn't started there are no reference queues to care about so ignore.
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700244 if (!cleared_references_.IsEmpty()) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700245 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800246 jobject cleared_references;
247 {
248 ReaderMutexLock mu(self, *Locks::mutator_lock_);
249 cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
250 self, cleared_references_.GetList());
251 }
252 if (kAsyncReferenceQueueAdd) {
253 // TODO: This can cause RunFinalization to terminate before newly freed objects are
254 // finalized since they may not be enqueued by the time RunFinalization starts.
255 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
256 self, new ClearedReferenceTask(cleared_references));
257 } else {
258 ClearedReferenceTask task(cleared_references);
259 task.Run(self);
260 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700261 }
262 cleared_references_.Clear();
263 }
264}
265
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700266bool ReferenceProcessor::MakeCircularListIfUnenqueued(
267 ObjPtr<mirror::FinalizerReference> reference) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700268 Thread* self = Thread::Current();
269 MutexLock mu(self, *Locks::reference_processor_lock_);
270 // Wait untul we are done processing reference.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700271 while ((!kUseReadBarrier && SlowPathEnabled()) ||
272 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Pavel Vyssotskid64ba382014-12-15 18:00:17 +0600273 condition_.WaitHoldingLocks(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700274 }
275 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
276 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
277 // phase. Since we are holding the reference processor lock, it guarantees that reference
278 // processing can't begin. The GC could have just enqueued the reference one one of the internal
279 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
280 // race.
281 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
Richard Uhlerc4695df2016-01-15 14:08:05 -0800282 if (reference->IsUnprocessed()) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700283 CHECK(reference->IsFinalizerReferenceInstance());
Richard Uhler522d51b2016-01-22 14:18:57 -0800284 reference->SetPendingNext(reference);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700285 return true;
286 }
287 return false;
288}
289
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700290} // namespace gc
291} // namespace art