blob: c08ed0e3496e51fb87bbab5faf0bf8306f31b84f [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/time_utils.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070020#include "mirror/class-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070021#include "mirror/object-inl.h"
22#include "mirror/reference-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070023#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070024#include "reflection.h"
25#include "ScopedLocalRef.h"
26#include "scoped_thread_state_change.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080027#include "task_processor.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010028#include "utils.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070029#include "well_known_classes.h"
30
31namespace art {
32namespace gc {
33
Mathieu Chartiera5eae692014-12-17 17:56:03 -080034static constexpr bool kAsyncReferenceQueueAdd = false;
35
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070036ReferenceProcessor::ReferenceProcessor()
Fred Shih4ee7a662014-07-11 09:59:27 -070037 : process_references_args_(nullptr, nullptr, nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070038 preserving_references_(false),
39 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
40 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
41 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
42 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
43 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
44 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070045}
46
47void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070048 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070049}
50
51void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070052 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070053 condition_.Broadcast(self);
54}
55
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070056void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
57 CHECK(kUseReadBarrier);
58 MutexLock mu(self, *Locks::reference_processor_lock_);
59 condition_.Broadcast(self);
60}
61
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070062mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070063 if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
64 // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
65 // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
66 // mark stack and interfere with termination of marking.
67 mirror::Object* const referent = reference->GetReferent();
68 // If the referent is null then it is already cleared, we can just return null since there is no
69 // scenario where it becomes non-null during the reference processing phase.
70 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
71 return referent;
72 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070073 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070074 MutexLock mu(self, *Locks::reference_processor_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070075 while ((!kUseReadBarrier && SlowPathEnabled()) ||
76 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070077 mirror::HeapReference<mirror::Object>* const referent_addr =
78 reference->GetReferentReferenceAddr();
79 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
80 // updated until after we leave the function due to holding the mutator lock.
81 if (referent_addr->AsMirrorPtr() == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070082 return nullptr;
83 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070084 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070085 // it to the mutator as long as the GC is not preserving references.
86 IsHeapReferenceMarkedCallback* const is_marked_callback =
87 process_references_args_.is_marked_callback_;
Fred Shih530e1b52014-06-09 15:19:54 -070088 if (LIKELY(is_marked_callback != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070089 // If it's null it means not marked, but it could become marked if the referent is reachable
Fred Shih530e1b52014-06-09 15:19:54 -070090 // by finalizer referents. So we can not return in this case and must block. Otherwise, we
91 // can return it to the mutator as long as the GC is not preserving references, in which
92 // case only black nodes can be safely returned. If the GC is preserving references, the
93 // mutator could take a white field from a grey or white node and move it somewhere else
94 // in the heap causing corruption since this field would get swept.
Mathieu Chartier308351a2014-06-15 12:39:02 -070095 if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
Fred Shih530e1b52014-06-09 15:19:54 -070096 if (!preserving_references_ ||
97 (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070098 return referent_addr->AsMirrorPtr();
Fred Shih530e1b52014-06-09 15:19:54 -070099 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700100 }
101 }
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -0700102 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700103 }
104 return reference->GetReferent();
105}
106
Mathieu Chartier308351a2014-06-15 12:39:02 -0700107bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
108 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700109 auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
Mathieu Chartier308351a2014-06-15 12:39:02 -0700110 // TODO: Add smarter logic for preserving soft references.
111 mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
112 DCHECK(new_obj != nullptr);
113 obj->Assign(new_obj);
114 return true;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700115}
116
117void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700118 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700119 preserving_references_ = true;
120}
121
122void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700123 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700124 preserving_references_ = false;
125 // We are done preserving references, some people who are blocked may see a marked referent.
126 condition_.Broadcast(self);
127}
128
129// Process reference class instances and schedule finalizations.
130void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
131 bool clear_soft_references,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700132 IsHeapReferenceMarkedCallback* is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700133 MarkObjectCallback* mark_object_callback,
134 ProcessMarkStackCallback* process_mark_stack_callback,
135 void* arg) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700136 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700137 Thread* self = Thread::Current();
138 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700139 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700140 process_references_args_.is_marked_callback_ = is_marked_callback;
141 process_references_args_.mark_callback_ = mark_object_callback;
142 process_references_args_.arg_ = arg;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700143 if (!kUseReadBarrier) {
144 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
145 } else {
146 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
147 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
148 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700149 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700150 // Unless required to clear soft references with white references, preserve some white referents.
151 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700152 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700153 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700154 if (concurrent) {
155 StartPreservingReferences(self);
156 }
Fred Shih530e1b52014-06-09 15:19:54 -0700157 soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
158 &process_references_args_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700159 process_mark_stack_callback(arg);
160 if (concurrent) {
161 StopPreservingReferences(self);
162 }
163 }
164 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700165 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
166 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700167 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800168 TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700169 "(Paused)EnqueueFinalizerReferences", timings);
170 if (concurrent) {
171 StartPreservingReferences(self);
172 }
173 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700174 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700175 mark_object_callback, arg);
176 process_mark_stack_callback(arg);
177 if (concurrent) {
178 StopPreservingReferences(self);
179 }
180 }
181 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700182 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
183 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700184 // Clear all phantom references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700185 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700186 // At this point all reference queues other than the cleared references should be empty.
187 DCHECK(soft_reference_queue_.IsEmpty());
188 DCHECK(weak_reference_queue_.IsEmpty());
189 DCHECK(finalizer_reference_queue_.IsEmpty());
190 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700191 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700192 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700193 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
194 // could result in a stale is_marked_callback_ being called before the reference processing
195 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
196 // callback isn't yet set.
197 process_references_args_.is_marked_callback_ = nullptr;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700198 if (!kUseReadBarrier) {
199 if (concurrent) {
200 // Done processing, disable the slow path and broadcast to the waiters.
201 DisableSlowPath(self);
202 }
Mathieu Chartier2175f522014-05-09 11:01:06 -0700203 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700204 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700205}
206
207// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
208// marked, put it on the appropriate list in the heap for later processing.
209void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700210 IsHeapReferenceMarkedCallback* is_marked_callback,
211 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700212 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700213 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700214 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700215 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
216 if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
217 Thread* self = Thread::Current();
218 // TODO: Remove these locks, and use atomic stacks for storing references?
219 // We need to check that the references haven't already been enqueued since we can end up
220 // scanning the same reference multiple times due to dirty cards.
221 if (klass->IsSoftReferenceClass()) {
222 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
223 } else if (klass->IsWeakReferenceClass()) {
224 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
225 } else if (klass->IsFinalizerReferenceClass()) {
226 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
227 } else if (klass->IsPhantomReferenceClass()) {
228 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
229 } else {
230 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
231 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700232 }
233 }
234}
235
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700236void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
237 cleared_references_.UpdateRoots(callback, arg);
238}
239
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800240class ClearedReferenceTask : public HeapTask {
241 public:
242 explicit ClearedReferenceTask(jobject cleared_references)
243 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
244 }
245 virtual void Run(Thread* thread) {
246 ScopedObjectAccess soa(thread);
247 jvalue args[1];
248 args[0].l = cleared_references_;
249 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
250 soa.Env()->DeleteGlobalRef(cleared_references_);
251 }
252
253 private:
254 const jobject cleared_references_;
255};
256
Mathieu Chartier308351a2014-06-15 12:39:02 -0700257void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700258 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800259 // When a runtime isn't started there are no reference queues to care about so ignore.
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700260 if (!cleared_references_.IsEmpty()) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700261 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800262 jobject cleared_references;
263 {
264 ReaderMutexLock mu(self, *Locks::mutator_lock_);
265 cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
266 self, cleared_references_.GetList());
267 }
268 if (kAsyncReferenceQueueAdd) {
269 // TODO: This can cause RunFinalization to terminate before newly freed objects are
270 // finalized since they may not be enqueued by the time RunFinalization starts.
271 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
272 self, new ClearedReferenceTask(cleared_references));
273 } else {
274 ClearedReferenceTask task(cleared_references);
275 task.Run(self);
276 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700277 }
278 cleared_references_.Clear();
279 }
280}
281
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700282bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
283 Thread* self = Thread::Current();
284 MutexLock mu(self, *Locks::reference_processor_lock_);
285 // Wait untul we are done processing reference.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700286 while ((!kUseReadBarrier && SlowPathEnabled()) ||
287 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Pavel Vyssotskid64ba382014-12-15 18:00:17 +0600288 condition_.WaitHoldingLocks(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700289 }
290 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
291 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
292 // phase. Since we are holding the reference processor lock, it guarantees that reference
293 // processing can't begin. The GC could have just enqueued the reference one one of the internal
294 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
295 // race.
296 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
297 if (!reference->IsEnqueued()) {
298 CHECK(reference->IsFinalizerReferenceInstance());
299 if (Runtime::Current()->IsActiveTransaction()) {
300 reference->SetPendingNext<true>(reference);
301 } else {
302 reference->SetPendingNext<false>(reference);
303 }
304 return true;
305 }
306 return false;
307}
308
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700309} // namespace gc
310} // namespace art