Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_ |
| 17 | #define ART_RUNTIME_LAMBDA_BOX_TABLE_H_ |
| 18 | |
| 19 | #include "base/allocator.h" |
| 20 | #include "base/hash_map.h" |
| 21 | #include "gc_root.h" |
| 22 | #include "base/macros.h" |
| 23 | #include "base/mutex.h" |
| 24 | #include "object_callbacks.h" |
| 25 | |
| 26 | #include <stdint.h> |
| 27 | |
| 28 | namespace art { |
| 29 | |
| 30 | class ArtMethod; // forward declaration |
| 31 | |
| 32 | namespace mirror { |
| 33 | class Object; // forward declaration |
| 34 | } // namespace mirror |
| 35 | |
| 36 | namespace lambda { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 37 | struct Closure; // forward declaration |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * Store a table of boxed lambdas. This is required to maintain object referential equality |
| 41 | * when a lambda is re-boxed. |
| 42 | * |
| 43 | * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>. |
| 44 | * When too many objects get GCd, we shrink the underlying table to use less space. |
| 45 | */ |
| 46 | class BoxTable FINAL { |
| 47 | public: |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 48 | using ClosureType = art::lambda::Closure*; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 49 | |
| 50 | // Boxes a closure into an object. Returns null and throws an exception on failure. |
| 51 | mirror::Object* BoxLambda(const ClosureType& closure) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 52 | SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 53 | |
| 54 | // Unboxes an object back into the lambda. Returns false and throws an exception on failure. |
Igor Murashkin | b1d8c31 | 2015-08-04 11:18:43 -0700 | [diff] [blame] | 55 | bool UnboxLambda(mirror::Object* object, ClosureType* out_closure) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 56 | SHARED_REQUIRES(Locks::mutator_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 57 | |
| 58 | // Sweep weak references to lambda boxes. Update the addresses if the objects have been |
| 59 | // moved, and delete them from the table if the objects have been cleaned up. |
| 60 | void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 61 | SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 62 | |
| 63 | // GC callback: Temporarily block anyone from touching the map. |
| 64 | void DisallowNewWeakBoxedLambdas() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 65 | REQUIRES(!Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 66 | |
| 67 | // GC callback: Unblock any readers who have been queued waiting to touch the map. |
| 68 | void AllowNewWeakBoxedLambdas() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 69 | REQUIRES(!Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 70 | |
Hiroshi Yamauchi | fdbd13c | 2015-09-02 16:16:58 -0700 | [diff] [blame] | 71 | // GC callback: Unblock any readers who have been queued waiting to touch the map. |
| 72 | void BroadcastForNewWeakBoxedLambdas() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 73 | REQUIRES(!Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 74 | |
| 75 | BoxTable(); |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 76 | ~BoxTable(); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 77 | |
| 78 | private: |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 79 | // Explanation: |
| 80 | // - After all threads are suspended (exclusive mutator lock), |
| 81 | // the concurrent-copying GC can move objects from the "from" space to the "to" space. |
| 82 | // If an object is moved at that time and *before* SweepSystemWeaks are called then |
| 83 | // we don't know if the move has happened yet. |
| 84 | // Successive reads will then (incorrectly) look at the objects in the "from" space, |
| 85 | // which is a problem since the objects have been already forwarded and mutations |
| 86 | // would not be visible in the right space. |
| 87 | // Instead, use a GcRoot here which will be automatically updated by the GC. |
| 88 | // |
| 89 | // Also, any reads should be protected by a read barrier to always give us the "to" space address. |
| 90 | using ValueType = GcRoot<mirror::Object>; |
| 91 | |
| 92 | // Attempt to look up the lambda in the map, or return null if it's not there yet. |
| 93 | ValueType FindBoxedLambda(const ClosureType& closure) const |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 94 | SHARED_REQUIRES(Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 95 | |
| 96 | // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed. |
| 97 | void BlockUntilWeaksAllowed() |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 98 | SHARED_REQUIRES(Locks::lambda_table_lock_); |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 99 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 100 | // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically. |
| 101 | using UnorderedMapKeyType = ClosureType; |
| 102 | |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 103 | // EmptyFn implementation for art::HashMap |
| 104 | struct EmptyFn { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 105 | void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const |
| 106 | NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_) |
| 107 | |
| 108 | bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const; |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 109 | }; |
| 110 | |
| 111 | // HashFn implementation for art::HashMap |
| 112 | struct HashFn { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 113 | size_t operator()(const UnorderedMapKeyType& key) const |
| 114 | NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_) |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 115 | }; |
| 116 | |
| 117 | // EqualsFn implementation for art::HashMap |
| 118 | struct EqualsFn { |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 119 | bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const |
| 120 | NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_) |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 121 | }; |
| 122 | |
Igor Murashkin | 6918bf1 | 2015-09-27 19:19:06 -0700 | [diff] [blame] | 123 | using UnorderedMap = art::HashMap<UnorderedMapKeyType, |
Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 124 | ValueType, |
| 125 | EmptyFn, |
| 126 | HashFn, |
| 127 | EqualsFn, |
| 128 | TrackingAllocator<std::pair<ClosureType, ValueType>, |
| 129 | kAllocatorTagLambdaBoxTable>>; |
| 130 | |
| 131 | UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_); |
| 132 | bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_); |
| 133 | ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_); |
| 134 | |
| 135 | // Shrink the map when we get below this load factor. |
| 136 | // (This is an arbitrary value that should be large enough to prevent aggressive map erases |
| 137 | // from shrinking the table too often.) |
| 138 | static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2; |
| 139 | |
| 140 | DISALLOW_COPY_AND_ASSIGN(BoxTable); |
| 141 | }; |
| 142 | |
| 143 | } // namespace lambda |
| 144 | } // namespace art |
| 145 | |
| 146 | #endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_ |