blob: d58d09c79449cd67000f2d8d45e3281df3d5097b [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/time_utils.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070020#include "collector/garbage_collector.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070021#include "java_vm_ext.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070022#include "mirror/class-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070023#include "mirror/object-inl.h"
24#include "mirror/reference-inl.h"
Andreas Gampe373a9b52017-10-18 09:01:57 -070025#include "nativehelper/scoped_local_ref.h"
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070026#include "object_callbacks.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070027#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070028#include "reflection.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070029#include "scoped_thread_state_change-inl.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080030#include "task_processor.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010031#include "utils.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070032#include "well_known_classes.h"
33
34namespace art {
35namespace gc {
36
Mathieu Chartiera5eae692014-12-17 17:56:03 -080037static constexpr bool kAsyncReferenceQueueAdd = false;
38
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070039ReferenceProcessor::ReferenceProcessor()
Mathieu Chartier97509952015-07-13 14:35:43 -070040 : collector_(nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070041 preserving_references_(false),
42 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
43 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
44 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
45 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
46 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
47 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070048}
49
50void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070051 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070052}
53
54void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070055 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070056 condition_.Broadcast(self);
57}
58
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070059void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070060 MutexLock mu(self, *Locks::reference_processor_lock_);
61 condition_.Broadcast(self);
62}
63
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070064ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
65 ObjPtr<mirror::Reference> reference) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070066 if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
67 // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
68 // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
69 // mark stack and interfere with termination of marking.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070070 ObjPtr<mirror::Object> const referent = reference->GetReferent();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070071 // If the referent is null then it is already cleared, we can just return null since there is no
72 // scenario where it becomes non-null during the reference processing phase.
73 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
74 return referent;
75 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070076 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070077 MutexLock mu(self, *Locks::reference_processor_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070078 while ((!kUseReadBarrier && SlowPathEnabled()) ||
79 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartier36994ba2016-12-13 11:46:28 -080080 ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
Mathieu Chartier308351a2014-06-15 12:39:02 -070081 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
82 // updated until after we leave the function due to holding the mutator lock.
Mathieu Chartier36994ba2016-12-13 11:46:28 -080083 if (referent == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070084 return nullptr;
85 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070086 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070087 // it to the mutator as long as the GC is not preserving references.
Mathieu Chartier97509952015-07-13 14:35:43 -070088 if (LIKELY(collector_ != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070089 // If it's null it means not marked, but it could become marked if the referent is reachable
Roland Levillain91d65e02016-01-19 15:59:16 +000090 // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
Fred Shih530e1b52014-06-09 15:19:54 -070091 // can return it to the mutator as long as the GC is not preserving references, in which
92 // case only black nodes can be safely returned. If the GC is preserving references, the
93 // mutator could take a white field from a grey or white node and move it somewhere else
94 // in the heap causing corruption since this field would get swept.
Mathieu Chartier36994ba2016-12-13 11:46:28 -080095 // Use the cached referent instead of calling GetReferent since other threads could call
96 // Reference.clear() after we did the null check resulting in a null pointer being
97 // incorrectly passed to IsMarked. b/33569625
98 ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
99 if (forwarded_ref != nullptr) {
100 // Non null means that it is marked.
Fred Shih530e1b52014-06-09 15:19:54 -0700101 if (!preserving_references_ ||
Richard Uhlerc4695df2016-01-15 14:08:05 -0800102 (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
Mathieu Chartier36994ba2016-12-13 11:46:28 -0800103 return forwarded_ref;
Fred Shih530e1b52014-06-09 15:19:54 -0700104 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700105 }
106 }
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700107 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
108 // presence of threads blocking for weak ref access.
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800109 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -0700110 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700111 }
112 return reference->GetReferent();
113}
114
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700115void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700116 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700117 preserving_references_ = true;
118}
119
120void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700121 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700122 preserving_references_ = false;
123 // We are done preserving references, some people who are blocked may see a marked referent.
124 condition_.Broadcast(self);
125}
126
127// Process reference class instances and schedule finalizations.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700128void ReferenceProcessor::ProcessReferences(bool concurrent,
129 TimingLogger* timings,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700130 bool clear_soft_references,
Mathieu Chartier97509952015-07-13 14:35:43 -0700131 collector::GarbageCollector* collector) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700132 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700133 Thread* self = Thread::Current();
134 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700135 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700136 collector_ = collector;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700137 if (!kUseReadBarrier) {
138 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
139 } else {
140 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
141 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
142 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700143 }
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800144 if (kIsDebugBuild && collector->IsTransactionActive()) {
145 // In transaction mode, we shouldn't enqueue any Reference to the queues.
146 // See DelayReferenceReferent().
147 DCHECK(soft_reference_queue_.IsEmpty());
148 DCHECK(weak_reference_queue_.IsEmpty());
149 DCHECK(finalizer_reference_queue_.IsEmpty());
150 DCHECK(phantom_reference_queue_.IsEmpty());
151 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700152 // Unless required to clear soft references with white references, preserve some white referents.
153 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700154 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700155 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700156 if (concurrent) {
157 StartPreservingReferences(self);
158 }
Mathieu Chartier81187812015-07-15 14:24:07 -0700159 // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
160 // mark if the SoftReference is supposed to be preserved.
Mathieu Chartier97509952015-07-13 14:35:43 -0700161 soft_reference_queue_.ForwardSoftReferences(collector);
162 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700163 if (concurrent) {
164 StopPreservingReferences(self);
165 }
166 }
167 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700168 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
169 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700170 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800171 TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700172 "(Paused)EnqueueFinalizerReferences", timings);
173 if (concurrent) {
174 StartPreservingReferences(self);
175 }
176 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier97509952015-07-13 14:35:43 -0700177 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
178 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700179 if (concurrent) {
180 StopPreservingReferences(self);
181 }
182 }
183 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700184 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
185 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700186 // Clear all phantom references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700187 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700188 // At this point all reference queues other than the cleared references should be empty.
189 DCHECK(soft_reference_queue_.IsEmpty());
190 DCHECK(weak_reference_queue_.IsEmpty());
191 DCHECK(finalizer_reference_queue_.IsEmpty());
192 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700193 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700194 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700195 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
196 // could result in a stale is_marked_callback_ being called before the reference processing
197 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
198 // callback isn't yet set.
Mathieu Chartier97509952015-07-13 14:35:43 -0700199 collector_ = nullptr;
200 if (!kUseReadBarrier && concurrent) {
201 // Done processing, disable the slow path and broadcast to the waiters.
202 DisableSlowPath(self);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700203 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700204 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700205}
206
207// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
208// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700209void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
210 ObjPtr<mirror::Reference> ref,
Mathieu Chartier97509952015-07-13 14:35:43 -0700211 collector::GarbageCollector* collector) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700212 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700213 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700214 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700215 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800216 // do_atomic_update needs to be true because this happens outside of the reference processing
217 // phase.
218 if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800219 if (UNLIKELY(collector->IsTransactionActive())) {
220 // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
221 // issue of rolling back reference processing. do_atomic_update needs to be true because this
222 // happens outside of the reference processing phase.
223 if (!referent->IsNull()) {
224 collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
225 }
226 return;
227 }
Mathieu Chartier308351a2014-06-15 12:39:02 -0700228 Thread* self = Thread::Current();
229 // TODO: Remove these locks, and use atomic stacks for storing references?
230 // We need to check that the references haven't already been enqueued since we can end up
231 // scanning the same reference multiple times due to dirty cards.
232 if (klass->IsSoftReferenceClass()) {
233 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
234 } else if (klass->IsWeakReferenceClass()) {
235 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
236 } else if (klass->IsFinalizerReferenceClass()) {
237 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
238 } else if (klass->IsPhantomReferenceClass()) {
239 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
240 } else {
David Sehr709b0702016-10-13 09:12:37 -0700241 LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
Mathieu Chartier308351a2014-06-15 12:39:02 -0700242 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700243 }
244 }
245}
246
Mathieu Chartier97509952015-07-13 14:35:43 -0700247void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
248 cleared_references_.UpdateRoots(visitor);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700249}
250
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800251class ClearedReferenceTask : public HeapTask {
252 public:
253 explicit ClearedReferenceTask(jobject cleared_references)
254 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
255 }
256 virtual void Run(Thread* thread) {
257 ScopedObjectAccess soa(thread);
258 jvalue args[1];
259 args[0].l = cleared_references_;
260 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
261 soa.Env()->DeleteGlobalRef(cleared_references_);
262 }
263
264 private:
265 const jobject cleared_references_;
266};
267
Mathieu Chartier308351a2014-06-15 12:39:02 -0700268void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700269 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800270 // When a runtime isn't started there are no reference queues to care about so ignore.
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700271 if (!cleared_references_.IsEmpty()) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700272 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800273 jobject cleared_references;
274 {
275 ReaderMutexLock mu(self, *Locks::mutator_lock_);
276 cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
277 self, cleared_references_.GetList());
278 }
279 if (kAsyncReferenceQueueAdd) {
280 // TODO: This can cause RunFinalization to terminate before newly freed objects are
281 // finalized since they may not be enqueued by the time RunFinalization starts.
282 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
283 self, new ClearedReferenceTask(cleared_references));
284 } else {
285 ClearedReferenceTask task(cleared_references);
286 task.Run(self);
287 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700288 }
289 cleared_references_.Clear();
290 }
291}
292
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800293void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700294 Thread* self = Thread::Current();
295 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800296 // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
297 // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
298 // This also handles the race where the referent gets cleared after a null check but before
299 // IsMarkedHeapReference is called.
300 WaitUntilDoneProcessingReferences(self);
301 if (Runtime::Current()->IsActiveTransaction()) {
302 ref->ClearReferent<true>();
303 } else {
304 ref->ClearReferent<false>();
305 }
306}
307
308void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
309 // Wait until we are done processing reference.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700310 while ((!kUseReadBarrier && SlowPathEnabled()) ||
311 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700312 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
313 // presence of threads blocking for weak ref access.
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800314 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
Pavel Vyssotskid64ba382014-12-15 18:00:17 +0600315 condition_.WaitHoldingLocks(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700316 }
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800317}
318
319bool ReferenceProcessor::MakeCircularListIfUnenqueued(
320 ObjPtr<mirror::FinalizerReference> reference) {
321 Thread* self = Thread::Current();
322 MutexLock mu(self, *Locks::reference_processor_lock_);
323 WaitUntilDoneProcessingReferences(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700324 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
325 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
326 // phase. Since we are holding the reference processor lock, it guarantees that reference
327 // processing can't begin. The GC could have just enqueued the reference one one of the internal
328 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
329 // race.
330 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
Richard Uhlerc4695df2016-01-15 14:08:05 -0800331 if (reference->IsUnprocessed()) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700332 CHECK(reference->IsFinalizerReferenceInstance());
Richard Uhler522d51b2016-01-22 14:18:57 -0800333 reference->SetPendingNext(reference);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700334 return true;
335 }
336 return false;
337}
338
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700339} // namespace gc
340} // namespace art