Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #include "lambda/box_table.h" |
| 17 | |
| 18 | #include "base/mutex.h" |
| 19 | #include "common_throws.h" |
| 20 | #include "gc_root-inl.h" |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 21 | #include "lambda/box_class_table.h" |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 22 | #include "lambda/closure.h" |
| 23 | #include "lambda/leaking_allocator.h" |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 24 | #include "mirror/lambda_proxy.h" |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 25 | #include "mirror/method.h" |
| 26 | #include "mirror/object-inl.h" |
| 27 | #include "thread.h" |
| 28 | |
| 29 | #include <vector> |
| 30 | |
| 31 | namespace art { |
| 32 | namespace lambda { |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 33 | // All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface. |
| 34 | using BoxedClosurePointerType = mirror::LambdaProxy*; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 35 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 36 | // Returns the base class for all boxed closures. |
| 37 | // Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy. |
| 38 | static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) { |
| 39 | return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | namespace { |
| 43 | // Convenience functions to allocating/deleting box table copies of the closures. |
| 44 | struct ClosureAllocator { |
| 45 | // Deletes a Closure that was allocated through ::Allocate. |
| 46 | static void Delete(Closure* ptr) { |
| 47 | delete[] reinterpret_cast<char*>(ptr); |
| 48 | } |
| 49 | |
| 50 | // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap. |
| 51 | static Closure* Allocate(size_t size) { |
| 52 | DCHECK_GE(size, sizeof(Closure)); |
| 53 | |
| 54 | // TODO: Maybe point to the interior of the boxed closure object after we add proxy support? |
| 55 | Closure* closure = reinterpret_cast<Closure*>(new char[size]); |
| 56 | DCHECK_ALIGNED(closure, alignof(Closure)); |
| 57 | return closure; |
| 58 | } |
| 59 | }; |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 60 | |
| 61 | struct DeleterForClosure { |
| 62 | void operator()(Closure* closure) const { |
| 63 | ClosureAllocator::Delete(closure); |
| 64 | } |
| 65 | }; |
| 66 | |
| 67 | using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 68 | } // namespace |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 69 | |
| 70 | BoxTable::BoxTable() |
| 71 | : allow_new_weaks_(true), |
| 72 | new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {} |
| 73 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 74 | BoxTable::~BoxTable() { |
| 75 | // Free all the copies of our closures. |
Mathieu Chartier | 055b5f3 | 2015-11-18 10:24:43 -0800 | [diff] [blame] | 76 | for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 77 | std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
| 78 | |
| 79 | Closure* closure = key_value_pair.first; |
| 80 | |
| 81 | // Remove from the map first, so that it doesn't try to access dangling pointer. |
| 82 | map_iterator = map_.Erase(map_iterator); |
| 83 | |
| 84 | // Safe to delete, no dangling pointers. |
| 85 | ClosureAllocator::Delete(closure); |
| 86 | } |
| 87 | } |
| 88 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 89 | mirror::Object* BoxTable::BoxLambda(const ClosureType& closure, |
| 90 | const char* class_name, |
| 91 | mirror::ClassLoader* class_loader) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 92 | Thread* self = Thread::Current(); |
| 93 | |
| 94 | { |
| 95 | // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes |
| 96 | /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_); |
| 97 | BlockUntilWeaksAllowed(); |
| 98 | |
| 99 | // Attempt to look up this object, it's possible it was already boxed previously. |
| 100 | // If this is the case we *must* return the same object as before to maintain |
| 101 | // referential equality. |
| 102 | // |
| 103 | // In managed code: |
| 104 | // Functional f = () -> 5; // vF = create-lambda |
| 105 | // Object a = f; // vA = box-lambda vA |
| 106 | // Object b = f; // vB = box-lambda vB |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 107 | // assert(a == b) |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 108 | ValueType value = FindBoxedLambda(closure); |
| 109 | if (!value.IsNull()) { |
| 110 | return value.Read(); |
| 111 | } |
| 112 | |
| 113 | // Otherwise we need to box ourselves and insert it into the hash map |
| 114 | } |
| 115 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 116 | // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy. |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 117 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 118 | // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 119 | StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4] |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 120 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 121 | Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 122 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 123 | // Release the lambda table lock here, so that thread suspension is allowed. |
| 124 | self->AllowThreadSuspension(); |
| 125 | |
| 126 | lambda::BoxClassTable* lambda_box_class_table; |
| 127 | |
| 128 | // Find the lambda box class table, which can be in the system class loader if classloader is null |
| 129 | if (class_loader == nullptr) { |
| 130 | ScopedObjectAccessUnchecked soa(self); |
| 131 | mirror::ClassLoader* system_class_loader = |
| 132 | soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader()); |
| 133 | lambda_box_class_table = system_class_loader->GetLambdaProxyCache(); |
| 134 | } else { |
| 135 | lambda_box_class_table = class_loader_handle->GetLambdaProxyCache(); |
| 136 | // OK: can't be deleted while we hold a handle to the class loader. |
| 137 | } |
| 138 | DCHECK(lambda_box_class_table != nullptr); |
| 139 | |
| 140 | Handle<mirror::Class> closure_class(hs.NewHandle( |
| 141 | lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle))); |
| 142 | if (UNLIKELY(closure_class.Get() == nullptr)) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 143 | // Most likely an OOM has occurred. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 144 | self->AssertPendingException(); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 145 | return nullptr; |
| 146 | } |
| 147 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 148 | BoxedClosurePointerType closure_as_object = nullptr; |
| 149 | UniqueClosurePtr closure_table_copy; |
| 150 | // Create an instance of the class, and assign the pointer to the closure into it. |
| 151 | { |
| 152 | closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self)); |
| 153 | if (UNLIKELY(closure_as_object == nullptr)) { |
| 154 | self->AssertPendingOOMException(); |
| 155 | return nullptr; |
| 156 | } |
| 157 | |
| 158 | // Make a copy of the closure that we will store in the hash map. |
| 159 | // The proxy instance will also point to this same hash map. |
| 160 | // Note that the closure pointer is cleaned up only after the proxy is GCd. |
| 161 | closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize())); |
| 162 | closure_as_object->SetClosure(closure_table_copy.get()); |
| 163 | } |
| 164 | |
| 165 | // There are no thread suspension points after this, so we don't need to put it into a handle. |
| 166 | ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5] |
| 167 | "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5] |
| 168 | |
| 169 | // Write the raw closure data into the proxy instance's copy of the closure. |
| 170 | closure->CopyTo(closure_table_copy.get(), |
| 171 | closure->GetSize()); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 172 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 173 | // The method has been successfully boxed into an object, now insert it into the hash map. |
| 174 | { |
| 175 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 176 | BlockUntilWeaksAllowed(); |
| 177 | |
| 178 | // Lookup the object again, it's possible another thread already boxed it while |
| 179 | // we were allocating the object before. |
| 180 | ValueType value = FindBoxedLambda(closure); |
| 181 | if (UNLIKELY(!value.IsNull())) { |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 182 | // Let the GC clean up closure_as_object at a later time. |
| 183 | // (We will not see this object when sweeping, it wasn't inserted yet.) |
| 184 | closure_as_object->SetClosure(nullptr); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 185 | return value.Read(); |
| 186 | } |
| 187 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 188 | // Otherwise we need to insert it into the hash map in this thread. |
| 189 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 190 | // The closure_table_copy is deleted by us manually when we erase it from the map. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 191 | |
| 192 | // Actually insert into the table. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 193 | map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)}); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 196 | return closure_as_object; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Igor Murashkin | b1d8c31 | 2015-08-04 11:18:43 -0700 | [diff] [blame] | 199 | bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { |
| 200 | DCHECK(object != nullptr); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 201 | *out_closure = nullptr; |
| 202 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 203 | Thread* self = Thread::Current(); |
| 204 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 205 | // Note that we do not need to access lambda_table_lock_ here |
| 206 | // since we don't need to look at the map. |
| 207 | |
| 208 | mirror::Object* boxed_closure_object = object; |
| 209 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 210 | // Raise ClassCastException if object is not instanceof LambdaProxy |
| 211 | if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) { |
| 212 | ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 213 | return false; |
| 214 | } |
| 215 | |
| 216 | // TODO(iam): We must check that the closure object extends/implements the type |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 217 | // specified in [type id]. This is not currently implemented since the type id is unavailable. |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 218 | |
| 219 | // If we got this far, the inputs are valid. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 220 | // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy, |
| 221 | // and return it. |
| 222 | BoxedClosurePointerType boxed_closure = |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 223 | down_cast<BoxedClosurePointerType>(boxed_closure_object); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 224 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 225 | DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure)); |
| 226 | const Closure* aligned_interior_closure = boxed_closure->GetClosure(); |
| 227 | DCHECK(aligned_interior_closure != nullptr); |
| 228 | |
| 229 | // TODO: we probably don't need to make a copy here later on, once there's GC support. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 230 | |
| 231 | // Allocate a copy that can "escape" and copy the closure data into that. |
| 232 | Closure* unboxed_closure = |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 233 | LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize()); |
| 234 | DCHECK_ALIGNED(unboxed_closure, alignof(Closure)); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 235 | // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 236 | memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize()); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 237 | |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 238 | DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 239 | |
| 240 | *out_closure = unboxed_closure; |
| 241 | return true; |
| 242 | } |
| 243 | |
| 244 | BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const { |
| 245 | auto map_iterator = map_.Find(closure); |
| 246 | if (map_iterator != map_.end()) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 247 | const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 248 | const ValueType& value = key_value_pair.second; |
| 249 | |
| 250 | DCHECK(!value.IsNull()); // Never store null boxes. |
| 251 | return value; |
| 252 | } |
| 253 | |
| 254 | return ValueType(nullptr); |
| 255 | } |
| 256 | |
| 257 | void BoxTable::BlockUntilWeaksAllowed() { |
| 258 | Thread* self = Thread::Current(); |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 259 | while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) || |
| 260 | (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 261 | new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { |
| 266 | DCHECK(visitor != nullptr); |
| 267 | |
| 268 | Thread* self = Thread::Current(); |
| 269 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 270 | |
| 271 | /* |
| 272 | * Visit every weak root in our lambda box table. |
| 273 | * Remove unmarked objects, update marked objects to new address. |
| 274 | */ |
| 275 | std::vector<ClosureType> remove_list; |
| 276 | for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 277 | std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 278 | |
| 279 | const ValueType& old_value = key_value_pair.second; |
| 280 | |
| 281 | // This does not need a read barrier because this is called by GC. |
| 282 | mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>(); |
| 283 | mirror::Object* new_value = visitor->IsMarked(old_value_raw); |
| 284 | |
| 285 | if (new_value == nullptr) { |
Nicolas Geoffray | 7bbb80a | 2015-09-27 19:50:40 +0000 | [diff] [blame] | 286 | // The object has been swept away. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 287 | Closure* closure = key_value_pair.first; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 288 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 289 | // Delete the entry from the map. |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 290 | // (Remove from map first to avoid accessing dangling pointer). |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 291 | map_iterator = map_.Erase(map_iterator); |
| 292 | |
| 293 | // Clean up the memory by deleting the closure. |
| 294 | ClosureAllocator::Delete(closure); |
| 295 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 296 | } else { |
| 297 | // The object has been moved. |
| 298 | // Update the map. |
| 299 | key_value_pair.second = ValueType(new_value); |
| 300 | ++map_iterator; |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | // Occasionally shrink the map to avoid growing very large. |
| 305 | if (map_.CalculateLoadFactor() < kMinimumLoadFactor) { |
| 306 | map_.ShrinkToMaximumLoad(); |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | void BoxTable::DisallowNewWeakBoxedLambdas() { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 311 | CHECK(!kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 312 | Thread* self = Thread::Current(); |
| 313 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 314 | |
| 315 | allow_new_weaks_ = false; |
| 316 | } |
| 317 | |
| 318 | void BoxTable::AllowNewWeakBoxedLambdas() { |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 319 | CHECK(!kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 320 | Thread* self = Thread::Current(); |
| 321 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 322 | |
| 323 | allow_new_weaks_ = true; |
| 324 | new_weaks_condition_.Broadcast(self); |
| 325 | } |
| 326 | |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 327 | void BoxTable::BroadcastForNewWeakBoxedLambdas() { |
| 328 | CHECK(kUseReadBarrier); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 329 | Thread* self = Thread::Current(); |
| 330 | MutexLock mu(self, *Locks::lambda_table_lock_); |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 331 | new_weaks_condition_.Broadcast(self); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 332 | } |
| 333 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 334 | void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const { |
| 335 | item.first = nullptr; |
| 336 | |
| 337 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 338 | item.second = ValueType(); // Also clear the GC root. |
| 339 | } |
| 340 | |
| 341 | bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const { |
Igor Murashkin | 457e874 | 2015-10-22 17:37:50 -0700 | [diff] [blame^] | 342 | bool is_empty = item.first == nullptr; |
| 343 | DCHECK_EQ(item.second.IsNull(), is_empty); |
| 344 | |
| 345 | return is_empty; |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs, |
| 349 | const UnorderedMapKeyType& rhs) const { |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 350 | // Nothing needs this right now, but leave this assertion for later when |
| 351 | // we need to look at the references inside of the closure. |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 352 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 353 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 354 | return lhs->ReferenceEquals(rhs); |
| 355 | } |
| 356 | |
| 357 | size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const { |
| 358 | const lambda::Closure* closure = key; |
| 359 | DCHECK_ALIGNED(closure, alignof(lambda::Closure)); |
| 360 | |
| 361 | // Need to hold mutator_lock_ before calling into Closure::GetHashCode. |
| 362 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 363 | return closure->GetHashCode(); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | } // namespace lambda |
| 367 | } // namespace art |