blob: 886c9507109b3249fe07762d2d23a57b6e9a1f4f [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/time_utils.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070020#include "collector/garbage_collector.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070021#include "java_vm_ext.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070022#include "mirror/class-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070023#include "mirror/object-inl.h"
24#include "mirror/reference-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070025#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070026#include "reflection.h"
27#include "ScopedLocalRef.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070028#include "scoped_thread_state_change-inl.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080029#include "task_processor.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010030#include "utils.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070031#include "well_known_classes.h"
32
33namespace art {
34namespace gc {
35
Mathieu Chartiera5eae692014-12-17 17:56:03 -080036static constexpr bool kAsyncReferenceQueueAdd = false;
37
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070038ReferenceProcessor::ReferenceProcessor()
Mathieu Chartier97509952015-07-13 14:35:43 -070039 : collector_(nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070040 preserving_references_(false),
41 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
42 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
43 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
44 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
45 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
46 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070047}
48
49void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070050 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070051}
52
53void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070054 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070055 condition_.Broadcast(self);
56}
57
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070058void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070059 MutexLock mu(self, *Locks::reference_processor_lock_);
60 condition_.Broadcast(self);
61}
62
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070063ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
64 ObjPtr<mirror::Reference> reference) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
66 // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
67 // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
68 // mark stack and interfere with termination of marking.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -070069 ObjPtr<mirror::Object> const referent = reference->GetReferent();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070070 // If the referent is null then it is already cleared, we can just return null since there is no
71 // scenario where it becomes non-null during the reference processing phase.
72 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
73 return referent;
74 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070075 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070076 MutexLock mu(self, *Locks::reference_processor_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070077 while ((!kUseReadBarrier && SlowPathEnabled()) ||
78 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartier36994ba2016-12-13 11:46:28 -080079 ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
Mathieu Chartier308351a2014-06-15 12:39:02 -070080 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
81 // updated until after we leave the function due to holding the mutator lock.
Mathieu Chartier36994ba2016-12-13 11:46:28 -080082 if (referent == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070083 return nullptr;
84 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070085 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070086 // it to the mutator as long as the GC is not preserving references.
Mathieu Chartier97509952015-07-13 14:35:43 -070087 if (LIKELY(collector_ != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070088 // If it's null it means not marked, but it could become marked if the referent is reachable
Roland Levillain91d65e02016-01-19 15:59:16 +000089 // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
Fred Shih530e1b52014-06-09 15:19:54 -070090 // can return it to the mutator as long as the GC is not preserving references, in which
91 // case only black nodes can be safely returned. If the GC is preserving references, the
92 // mutator could take a white field from a grey or white node and move it somewhere else
93 // in the heap causing corruption since this field would get swept.
Mathieu Chartier36994ba2016-12-13 11:46:28 -080094 // Use the cached referent instead of calling GetReferent since other threads could call
95 // Reference.clear() after we did the null check resulting in a null pointer being
96 // incorrectly passed to IsMarked. b/33569625
97 ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
98 if (forwarded_ref != nullptr) {
99 // Non null means that it is marked.
Fred Shih530e1b52014-06-09 15:19:54 -0700100 if (!preserving_references_ ||
Richard Uhlerc4695df2016-01-15 14:08:05 -0800101 (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
Mathieu Chartier36994ba2016-12-13 11:46:28 -0800102 return forwarded_ref;
Fred Shih530e1b52014-06-09 15:19:54 -0700103 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700104 }
105 }
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700106 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
107 // presence of threads blocking for weak ref access.
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800108 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -0700109 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700110 }
111 return reference->GetReferent();
112}
113
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700114void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700115 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700116 preserving_references_ = true;
117}
118
119void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700120 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700121 preserving_references_ = false;
122 // We are done preserving references, some people who are blocked may see a marked referent.
123 condition_.Broadcast(self);
124}
125
126// Process reference class instances and schedule finalizations.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700127void ReferenceProcessor::ProcessReferences(bool concurrent,
128 TimingLogger* timings,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700129 bool clear_soft_references,
Mathieu Chartier97509952015-07-13 14:35:43 -0700130 collector::GarbageCollector* collector) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700131 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700132 Thread* self = Thread::Current();
133 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700134 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700135 collector_ = collector;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700136 if (!kUseReadBarrier) {
137 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
138 } else {
139 // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
140 CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
141 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700142 }
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800143 if (kIsDebugBuild && collector->IsTransactionActive()) {
144 // In transaction mode, we shouldn't enqueue any Reference to the queues.
145 // See DelayReferenceReferent().
146 DCHECK(soft_reference_queue_.IsEmpty());
147 DCHECK(weak_reference_queue_.IsEmpty());
148 DCHECK(finalizer_reference_queue_.IsEmpty());
149 DCHECK(phantom_reference_queue_.IsEmpty());
150 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700151 // Unless required to clear soft references with white references, preserve some white referents.
152 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700153 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700154 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700155 if (concurrent) {
156 StartPreservingReferences(self);
157 }
Mathieu Chartier81187812015-07-15 14:24:07 -0700158 // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
159 // mark if the SoftReference is supposed to be preserved.
Mathieu Chartier97509952015-07-13 14:35:43 -0700160 soft_reference_queue_.ForwardSoftReferences(collector);
161 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700162 if (concurrent) {
163 StopPreservingReferences(self);
164 }
165 }
166 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700167 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
168 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700169 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800170 TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700171 "(Paused)EnqueueFinalizerReferences", timings);
172 if (concurrent) {
173 StartPreservingReferences(self);
174 }
175 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier97509952015-07-13 14:35:43 -0700176 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
177 collector->ProcessMarkStack();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700178 if (concurrent) {
179 StopPreservingReferences(self);
180 }
181 }
182 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700183 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
184 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700185 // Clear all phantom references with white referents.
Mathieu Chartier97509952015-07-13 14:35:43 -0700186 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700187 // At this point all reference queues other than the cleared references should be empty.
188 DCHECK(soft_reference_queue_.IsEmpty());
189 DCHECK(weak_reference_queue_.IsEmpty());
190 DCHECK(finalizer_reference_queue_.IsEmpty());
191 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700192 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700193 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700194 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
195 // could result in a stale is_marked_callback_ being called before the reference processing
196 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
197 // callback isn't yet set.
Mathieu Chartier97509952015-07-13 14:35:43 -0700198 collector_ = nullptr;
199 if (!kUseReadBarrier && concurrent) {
200 // Done processing, disable the slow path and broadcast to the waiters.
201 DisableSlowPath(self);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700202 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700203 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700204}
205
206// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
207// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier5d3f73a2016-10-14 14:28:47 -0700208void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
209 ObjPtr<mirror::Reference> ref,
Mathieu Chartier97509952015-07-13 14:35:43 -0700210 collector::GarbageCollector* collector) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700211 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700212 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700213 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700214 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800215 // do_atomic_update needs to be true because this happens outside of the reference processing
216 // phase.
217 if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800218 if (UNLIKELY(collector->IsTransactionActive())) {
219 // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
220 // issue of rolling back reference processing. do_atomic_update needs to be true because this
221 // happens outside of the reference processing phase.
222 if (!referent->IsNull()) {
223 collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
224 }
225 return;
226 }
Mathieu Chartier308351a2014-06-15 12:39:02 -0700227 Thread* self = Thread::Current();
228 // TODO: Remove these locks, and use atomic stacks for storing references?
229 // We need to check that the references haven't already been enqueued since we can end up
230 // scanning the same reference multiple times due to dirty cards.
231 if (klass->IsSoftReferenceClass()) {
232 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
233 } else if (klass->IsWeakReferenceClass()) {
234 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
235 } else if (klass->IsFinalizerReferenceClass()) {
236 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
237 } else if (klass->IsPhantomReferenceClass()) {
238 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
239 } else {
David Sehr709b0702016-10-13 09:12:37 -0700240 LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
Mathieu Chartier308351a2014-06-15 12:39:02 -0700241 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700242 }
243 }
244}
245
Mathieu Chartier97509952015-07-13 14:35:43 -0700246void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
247 cleared_references_.UpdateRoots(visitor);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700248}
249
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800250class ClearedReferenceTask : public HeapTask {
251 public:
252 explicit ClearedReferenceTask(jobject cleared_references)
253 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
254 }
255 virtual void Run(Thread* thread) {
256 ScopedObjectAccess soa(thread);
257 jvalue args[1];
258 args[0].l = cleared_references_;
259 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
260 soa.Env()->DeleteGlobalRef(cleared_references_);
261 }
262
263 private:
264 const jobject cleared_references_;
265};
266
Mathieu Chartier308351a2014-06-15 12:39:02 -0700267void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700268 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800269 // When a runtime isn't started there are no reference queues to care about so ignore.
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700270 if (!cleared_references_.IsEmpty()) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700271 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800272 jobject cleared_references;
273 {
274 ReaderMutexLock mu(self, *Locks::mutator_lock_);
275 cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
276 self, cleared_references_.GetList());
277 }
278 if (kAsyncReferenceQueueAdd) {
279 // TODO: This can cause RunFinalization to terminate before newly freed objects are
280 // finalized since they may not be enqueued by the time RunFinalization starts.
281 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
282 self, new ClearedReferenceTask(cleared_references));
283 } else {
284 ClearedReferenceTask task(cleared_references);
285 task.Run(self);
286 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700287 }
288 cleared_references_.Clear();
289 }
290}
291
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800292void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700293 Thread* self = Thread::Current();
294 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800295 // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
296 // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
297 // This also handles the race where the referent gets cleared after a null check but before
298 // IsMarkedHeapReference is called.
299 WaitUntilDoneProcessingReferences(self);
300 if (Runtime::Current()->IsActiveTransaction()) {
301 ref->ClearReferent<true>();
302 } else {
303 ref->ClearReferent<false>();
304 }
305}
306
307void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
308 // Wait until we are done processing reference.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700309 while ((!kUseReadBarrier && SlowPathEnabled()) ||
310 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700311 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
312 // presence of threads blocking for weak ref access.
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800313 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
Pavel Vyssotskid64ba382014-12-15 18:00:17 +0600314 condition_.WaitHoldingLocks(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700315 }
Mathieu Chartierc9a70282016-12-13 14:44:33 -0800316}
317
318bool ReferenceProcessor::MakeCircularListIfUnenqueued(
319 ObjPtr<mirror::FinalizerReference> reference) {
320 Thread* self = Thread::Current();
321 MutexLock mu(self, *Locks::reference_processor_lock_);
322 WaitUntilDoneProcessingReferences(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700323 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
324 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
325 // phase. Since we are holding the reference processor lock, it guarantees that reference
326 // processing can't begin. The GC could have just enqueued the reference one one of the internal
327 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
328 // race.
329 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
Richard Uhlerc4695df2016-01-15 14:08:05 -0800330 if (reference->IsUnprocessed()) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700331 CHECK(reference->IsFinalizerReferenceInstance());
Richard Uhler522d51b2016-01-22 14:18:57 -0800332 reference->SetPendingNext(reference);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700333 return true;
334 }
335 return false;
336}
337
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700338} // namespace gc
339} // namespace art