blob: 292781e6e7b2af3182647e451f28efab2bcbe21c [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
19#include "mirror/object-inl.h"
20#include "mirror/reference-inl.h"
21#include "reflection.h"
22#include "ScopedLocalRef.h"
23#include "scoped_thread_state_change.h"
24#include "well_known_classes.h"
25
26namespace art {
27namespace gc {
28
29ReferenceProcessor::ReferenceProcessor()
30 : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -070031 preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070032 condition_("reference processor condition", lock_) {
33}
34
35void ReferenceProcessor::EnableSlowPath() {
36 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
37 slow_path_enabled_ = true;
38}
39
40void ReferenceProcessor::DisableSlowPath(Thread* self) {
41 slow_path_enabled_ = false;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070042 condition_.Broadcast(self);
43}
44
45mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
46 mirror::Object* const referent = reference->GetReferent();
Mathieu Chartier308351a2014-06-15 12:39:02 -070047 // If the referent is null then it is already cleared, we can just return null since there is no
48 // scenario where it becomes non-null during the reference processing phase.
49 if (LIKELY(!slow_path_enabled_) || referent == nullptr) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070050 return referent;
51 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070052 MutexLock mu(self, lock_);
53 while (slow_path_enabled_) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070054 mirror::HeapReference<mirror::Object>* const referent_addr =
55 reference->GetReferentReferenceAddr();
56 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
57 // updated until after we leave the function due to holding the mutator lock.
58 if (referent_addr->AsMirrorPtr() == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070059 return nullptr;
60 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070061 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070062 // it to the mutator as long as the GC is not preserving references.
63 IsHeapReferenceMarkedCallback* const is_marked_callback =
64 process_references_args_.is_marked_callback_;
Fred Shih530e1b52014-06-09 15:19:54 -070065 if (LIKELY(is_marked_callback != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070066 // If it's null it means not marked, but it could become marked if the referent is reachable
Fred Shih530e1b52014-06-09 15:19:54 -070067 // by finalizer referents. So we can not return in this case and must block. Otherwise, we
68 // can return it to the mutator as long as the GC is not preserving references, in which
69 // case only black nodes can be safely returned. If the GC is preserving references, the
70 // mutator could take a white field from a grey or white node and move it somewhere else
71 // in the heap causing corruption since this field would get swept.
Mathieu Chartier308351a2014-06-15 12:39:02 -070072 if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
Fred Shih530e1b52014-06-09 15:19:54 -070073 if (!preserving_references_ ||
74 (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070075 return referent_addr->AsMirrorPtr();
Fred Shih530e1b52014-06-09 15:19:54 -070076 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070077 }
78 }
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -070079 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070080 }
81 return reference->GetReferent();
82}
83
Mathieu Chartier308351a2014-06-15 12:39:02 -070084bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
85 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070086 auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
Mathieu Chartier308351a2014-06-15 12:39:02 -070087 // TODO: Add smarter logic for preserving soft references.
88 mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
89 DCHECK(new_obj != nullptr);
90 obj->Assign(new_obj);
91 return true;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070092}
93
94void ReferenceProcessor::StartPreservingReferences(Thread* self) {
95 MutexLock mu(self, lock_);
96 preserving_references_ = true;
97}
98
99void ReferenceProcessor::StopPreservingReferences(Thread* self) {
100 MutexLock mu(self, lock_);
101 preserving_references_ = false;
102 // We are done preserving references, some people who are blocked may see a marked referent.
103 condition_.Broadcast(self);
104}
105
106// Process reference class instances and schedule finalizations.
107void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
108 bool clear_soft_references,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700109 IsHeapReferenceMarkedCallback* is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700110 MarkObjectCallback* mark_object_callback,
111 ProcessMarkStackCallback* process_mark_stack_callback,
112 void* arg) {
113 Thread* self = Thread::Current();
114 {
115 MutexLock mu(self, lock_);
116 process_references_args_.is_marked_callback_ = is_marked_callback;
117 process_references_args_.mark_callback_ = mark_object_callback;
118 process_references_args_.arg_ = arg;
Mathieu Chartier2175f522014-05-09 11:01:06 -0700119 CHECK_EQ(slow_path_enabled_, concurrent) << "Slow path must be enabled iff concurrent";
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700120 }
Mathieu Chartier2175f522014-05-09 11:01:06 -0700121 timings->StartSplit(concurrent ? "ProcessReferences" : "(Paused)ProcessReferences");
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700122 // Unless required to clear soft references with white references, preserve some white referents.
123 if (!clear_soft_references) {
Fred Shih530e1b52014-06-09 15:19:54 -0700124 TimingLogger::ScopedSplit split(concurrent ? "ForwardSoftReferences" :
125 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700126 if (concurrent) {
127 StartPreservingReferences(self);
128 }
Fred Shih530e1b52014-06-09 15:19:54 -0700129
130 soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
131 &process_references_args_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700132 process_mark_stack_callback(arg);
133 if (concurrent) {
134 StopPreservingReferences(self);
135 }
136 }
137 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700138 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
139 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700140 {
141 TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
142 "(Paused)EnqueueFinalizerReferences", timings);
143 if (concurrent) {
144 StartPreservingReferences(self);
145 }
146 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700147 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700148 mark_object_callback, arg);
149 process_mark_stack_callback(arg);
150 if (concurrent) {
151 StopPreservingReferences(self);
152 }
153 }
154 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700155 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
156 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700157 // Clear all phantom references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700158 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700159 // At this point all reference queues other than the cleared references should be empty.
160 DCHECK(soft_reference_queue_.IsEmpty());
161 DCHECK(weak_reference_queue_.IsEmpty());
162 DCHECK(finalizer_reference_queue_.IsEmpty());
163 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700164 {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700165 MutexLock mu(self, lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700166 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
167 // could result in a stale is_marked_callback_ being called before the reference processing
168 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
169 // callback isn't yet set.
170 process_references_args_.is_marked_callback_ = nullptr;
171 if (concurrent) {
172 // Done processing, disable the slow path and broadcast to the waiters.
173 DisableSlowPath(self);
174 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700175 }
176 timings->EndSplit();
177}
178
179// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
180// marked, put it on the appropriate list in the heap for later processing.
181void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700182 IsHeapReferenceMarkedCallback* is_marked_callback,
183 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700184 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700185 DCHECK(klass != nullptr);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700186 DCHECK(klass->IsReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700187 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
188 if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
189 Thread* self = Thread::Current();
190 // TODO: Remove these locks, and use atomic stacks for storing references?
191 // We need to check that the references haven't already been enqueued since we can end up
192 // scanning the same reference multiple times due to dirty cards.
193 if (klass->IsSoftReferenceClass()) {
194 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
195 } else if (klass->IsWeakReferenceClass()) {
196 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
197 } else if (klass->IsFinalizerReferenceClass()) {
198 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
199 } else if (klass->IsPhantomReferenceClass()) {
200 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
201 } else {
202 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
203 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700204 }
205 }
206}
207
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700208void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
209 cleared_references_.UpdateRoots(callback, arg);
210}
211
Mathieu Chartier308351a2014-06-15 12:39:02 -0700212void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700213 Locks::mutator_lock_->AssertNotHeld(self);
214 if (!cleared_references_.IsEmpty()) {
215 // When a runtime isn't started there are no reference queues to care about so ignore.
216 if (LIKELY(Runtime::Current()->IsStarted())) {
217 ScopedObjectAccess soa(self);
218 ScopedLocalRef<jobject> arg(self->GetJniEnv(),
219 soa.AddLocalReference<jobject>(cleared_references_.GetList()));
220 jvalue args[1];
221 args[0].l = arg.get();
222 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
223 }
224 cleared_references_.Clear();
225 }
226}
227
228} // namespace gc
229} // namespace art