Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #include "lambda/box_table.h" |
| 17 | |
| 18 | #include "base/mutex.h" |
| 19 | #include "common_throws.h" |
| 20 | #include "gc_root-inl.h" |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 21 | #include "lambda/closure.h" |
| 22 | #include "lambda/leaking_allocator.h" |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 23 | #include "mirror/method.h" |
| 24 | #include "mirror/object-inl.h" |
| 25 | #include "thread.h" |
| 26 | |
| 27 | #include <vector> |
| 28 | |
| 29 | namespace art { |
| 30 | namespace lambda { |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 31 | // Temporarily represent the lambda Closure as its raw bytes in an array. |
| 32 | // TODO: Generate a proxy class for the closure when boxing the first time. |
| 33 | using BoxedClosurePointerType = mirror::ByteArray*; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 34 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 35 | static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) { |
| 36 | return mirror::ByteArray::GetArrayClass(); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | namespace { |
| 40 | // Convenience functions to allocating/deleting box table copies of the closures. |
| 41 | struct ClosureAllocator { |
| 42 | // Deletes a Closure that was allocated through ::Allocate. |
| 43 | static void Delete(Closure* ptr) { |
| 44 | delete[] reinterpret_cast<char*>(ptr); |
| 45 | } |
| 46 | |
| 47 | // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap. |
| 48 | static Closure* Allocate(size_t size) { |
| 49 | DCHECK_GE(size, sizeof(Closure)); |
| 50 | |
| 51 | // TODO: Maybe point to the interior of the boxed closure object after we add proxy support? |
| 52 | Closure* closure = reinterpret_cast<Closure*>(new char[size]); |
| 53 | DCHECK_ALIGNED(closure, alignof(Closure)); |
| 54 | return closure; |
| 55 | } |
| 56 | }; |
| 57 | } // namespace |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 58 | |
| 59 | BoxTable::BoxTable() |
| 60 | : allow_new_weaks_(true), |
| 61 | new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {} |
| 62 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 63 | BoxTable::~BoxTable() { |
| 64 | // Free all the copies of our closures. |
Mathieu Chartier | 055b5f3 | 2015-11-18 10:24:43 -0800 | [diff] [blame] | 65 | for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 66 | std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
| 67 | |
| 68 | Closure* closure = key_value_pair.first; |
| 69 | |
| 70 | // Remove from the map first, so that it doesn't try to access dangling pointer. |
| 71 | map_iterator = map_.Erase(map_iterator); |
| 72 | |
| 73 | // Safe to delete, no dangling pointers. |
| 74 | ClosureAllocator::Delete(closure); |
| 75 | } |
| 76 | } |
| 77 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 78 | mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 79 | Thread* self = Thread::Current(); |
| 80 | |
| 81 | { |
| 82 | // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes |
| 83 | /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_); |
| 84 | BlockUntilWeaksAllowed(); |
| 85 | |
| 86 | // Attempt to look up this object, it's possible it was already boxed previously. |
| 87 | // If this is the case we *must* return the same object as before to maintain |
| 88 | // referential equality. |
| 89 | // |
| 90 | // In managed code: |
| 91 | // Functional f = () -> 5; // vF = create-lambda |
| 92 | // Object a = f; // vA = box-lambda vA |
| 93 | // Object b = f; // vB = box-lambda vB |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 94 | // assert(a == f) |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 95 | ValueType value = FindBoxedLambda(closure); |
| 96 | if (!value.IsNull()) { |
| 97 | return value.Read(); |
| 98 | } |
| 99 | |
| 100 | // Otherwise we need to box ourselves and insert it into the hash map |
| 101 | } |
| 102 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame] | 103 | // Release the lambda table lock here, so that thread suspension is allowed. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame] | 104 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 105 | // Convert the Closure into a managed byte[] which will serve |
| 106 | // as the temporary 'boxed' version of the lambda. This is good enough |
| 107 | // to check all the basic object identities that a boxed lambda must retain. |
| 108 | // It's also good enough to contain all the captured primitive variables. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame] | 109 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 110 | // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class |
| 111 | // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object |
| 112 | BoxedClosurePointerType closure_as_array_object = |
| 113 | mirror::ByteArray::Alloc(self, closure->GetSize()); |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame] | 114 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 115 | // There are no thread suspension points after this, so we don't need to put it into a handle. |
| 116 | |
| 117 | if (UNLIKELY(closure_as_array_object == nullptr)) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 118 | // Most likely an OOM has occurred. |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 119 | CHECK(self->IsExceptionPending()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 120 | return nullptr; |
| 121 | } |
| 122 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 123 | // Write the raw closure data into the byte[]. |
| 124 | closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size |
| 125 | 0 /*index*/), // index |
| 126 | closure_as_array_object->GetLength()); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 127 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 128 | // The method has been successfully boxed into an object, now insert it into the hash map. |
| 129 | { |
| 130 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 131 | BlockUntilWeaksAllowed(); |
| 132 | |
| 133 | // Lookup the object again, it's possible another thread already boxed it while |
| 134 | // we were allocating the object before. |
| 135 | ValueType value = FindBoxedLambda(closure); |
| 136 | if (UNLIKELY(!value.IsNull())) { |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 137 | // Let the GC clean up method_as_object at a later time. |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 138 | return value.Read(); |
| 139 | } |
| 140 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 141 | // Otherwise we need to insert it into the hash map in this thread. |
| 142 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 143 | // Make a copy for the box table to keep, in case the closure gets collected from the stack. |
| 144 | // TODO: GC may need to sweep for roots in the box table's copy of the closure. |
| 145 | Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize()); |
| 146 | closure->CopyTo(closure_table_copy, closure->GetSize()); |
| 147 | |
| 148 | // The closure_table_copy needs to be deleted by us manually when we erase it from the map. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 149 | |
| 150 | // Actually insert into the table. |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 151 | map_.Insert({closure_table_copy, ValueType(closure_as_array_object)}); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 154 | return closure_as_array_object; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Igor Murashkin | b1d8c31 | 2015-08-04 11:18:43 -0700 | [diff] [blame] | 157 | bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { |
| 158 | DCHECK(object != nullptr); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 159 | *out_closure = nullptr; |
| 160 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 161 | Thread* self = Thread::Current(); |
| 162 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 163 | // Note that we do not need to access lambda_table_lock_ here |
| 164 | // since we don't need to look at the map. |
| 165 | |
| 166 | mirror::Object* boxed_closure_object = object; |
| 167 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 168 | // Raise ClassCastException if object is not instanceof byte[] |
| 169 | if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) { |
| 170 | ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 171 | return false; |
| 172 | } |
| 173 | |
| 174 | // TODO(iam): We must check that the closure object extends/implements the type |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 175 | // specified in [type id]. This is not currently implemented since it's always a byte[]. |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 176 | |
| 177 | // If we got this far, the inputs are valid. |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 178 | // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it. |
| 179 | BoxedClosurePointerType boxed_closure_as_array = |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 180 | down_cast<BoxedClosurePointerType>(boxed_closure_object); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 181 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 182 | const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData(); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 183 | |
| 184 | // Allocate a copy that can "escape" and copy the closure data into that. |
| 185 | Closure* unboxed_closure = |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 186 | LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength()); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 187 | // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix. |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 188 | memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength()); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 189 | |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 190 | DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength())); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 191 | |
| 192 | *out_closure = unboxed_closure; |
| 193 | return true; |
| 194 | } |
| 195 | |
| 196 | BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const { |
| 197 | auto map_iterator = map_.Find(closure); |
| 198 | if (map_iterator != map_.end()) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 199 | const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 200 | const ValueType& value = key_value_pair.second; |
| 201 | |
| 202 | DCHECK(!value.IsNull()); // Never store null boxes. |
| 203 | return value; |
| 204 | } |
| 205 | |
| 206 | return ValueType(nullptr); |
| 207 | } |
| 208 | |
| 209 | void BoxTable::BlockUntilWeaksAllowed() { |
| 210 | Thread* self = Thread::Current(); |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 211 | while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) || |
| 212 | (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 213 | new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { |
| 218 | DCHECK(visitor != nullptr); |
| 219 | |
| 220 | Thread* self = Thread::Current(); |
| 221 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 222 | |
| 223 | /* |
| 224 | * Visit every weak root in our lambda box table. |
| 225 | * Remove unmarked objects, update marked objects to new address. |
| 226 | */ |
| 227 | std::vector<ClosureType> remove_list; |
| 228 | for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 229 | std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 230 | |
| 231 | const ValueType& old_value = key_value_pair.second; |
| 232 | |
| 233 | // This does not need a read barrier because this is called by GC. |
| 234 | mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>(); |
| 235 | mirror::Object* new_value = visitor->IsMarked(old_value_raw); |
| 236 | |
| 237 | if (new_value == nullptr) { |
Nicolas Geoffray | 7bbb80a | 2015-09-27 19:50:40 +0000 | [diff] [blame] | 238 | // The object has been swept away. |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 239 | const ClosureType& closure = key_value_pair.first; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 240 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 241 | // Delete the entry from the map. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 242 | map_iterator = map_.Erase(map_iterator); |
| 243 | |
| 244 | // Clean up the memory by deleting the closure. |
| 245 | ClosureAllocator::Delete(closure); |
| 246 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 247 | } else { |
| 248 | // The object has been moved. |
| 249 | // Update the map. |
| 250 | key_value_pair.second = ValueType(new_value); |
| 251 | ++map_iterator; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | // Occasionally shrink the map to avoid growing very large. |
| 256 | if (map_.CalculateLoadFactor() < kMinimumLoadFactor) { |
| 257 | map_.ShrinkToMaximumLoad(); |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | void BoxTable::DisallowNewWeakBoxedLambdas() { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 262 | CHECK(!kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 263 | Thread* self = Thread::Current(); |
| 264 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 265 | |
| 266 | allow_new_weaks_ = false; |
| 267 | } |
| 268 | |
| 269 | void BoxTable::AllowNewWeakBoxedLambdas() { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 270 | CHECK(!kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 271 | Thread* self = Thread::Current(); |
| 272 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 273 | |
| 274 | allow_new_weaks_ = true; |
| 275 | new_weaks_condition_.Broadcast(self); |
| 276 | } |
| 277 | |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 278 | void BoxTable::BroadcastForNewWeakBoxedLambdas() { |
| 279 | CHECK(kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 280 | Thread* self = Thread::Current(); |
| 281 | MutexLock mu(self, *Locks::lambda_table_lock_); |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 282 | new_weaks_condition_.Broadcast(self); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 285 | void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const { |
| 286 | item.first = nullptr; |
| 287 | |
| 288 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 289 | item.second = ValueType(); // Also clear the GC root. |
| 290 | } |
| 291 | |
| 292 | bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const { |
Nicolas Geoffray | 3a09092 | 2015-11-24 09:17:30 +0000 | [diff] [blame] | 293 | return item.first == nullptr; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs, |
| 297 | const UnorderedMapKeyType& rhs) const { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 298 | // Nothing needs this right now, but leave this assertion for later when |
| 299 | // we need to look at the references inside of the closure. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 300 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 301 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 302 | return lhs->ReferenceEquals(rhs); |
| 303 | } |
| 304 | |
| 305 | size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const { |
| 306 | const lambda::Closure* closure = key; |
| 307 | DCHECK_ALIGNED(closure, alignof(lambda::Closure)); |
| 308 | |
| 309 | // Need to hold mutator_lock_ before calling into Closure::GetHashCode. |
| 310 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 311 | return closure->GetHashCode(); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | } // namespace lambda |
| 315 | } // namespace art |