blob: 9dca6ab66b3f6f3301df513ec87e1fd99127f5b3 [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
17#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
18
19#include "base/allocator.h"
20#include "base/hash_map.h"
21#include "gc_root.h"
22#include "base/macros.h"
23#include "base/mutex.h"
24#include "object_callbacks.h"
25
26#include <stdint.h>
27
28namespace art {
29
30class ArtMethod; // forward declaration
31
32namespace mirror {
Igor Murashkin457e8742015-10-22 17:37:50 -070033class Class; // forward declaration
34class ClassLoader; // forward declaration
35class LambdaProxy; // forward declaration
Igor Murashkine2facc52015-07-10 13:49:08 -070036class Object; // forward declaration
37} // namespace mirror
38
39namespace lambda {
Igor Murashkin6918bf12015-09-27 19:19:06 -070040struct Closure; // forward declaration
Igor Murashkine2facc52015-07-10 13:49:08 -070041
42/*
43 * Store a table of boxed lambdas. This is required to maintain object referential equality
44 * when a lambda is re-boxed.
45 *
46 * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
47 * When too many objects get GCd, we shrink the underlying table to use less space.
48 */
49class BoxTable FINAL {
50 public:
Igor Murashkin6918bf12015-09-27 19:19:06 -070051 using ClosureType = art::lambda::Closure*;
Igor Murashkine2facc52015-07-10 13:49:08 -070052
53 // Boxes a closure into an object. Returns null and throws an exception on failure.
Igor Murashkin457e8742015-10-22 17:37:50 -070054 mirror::Object* BoxLambda(const ClosureType& closure,
55 const char* class_name,
56 mirror::ClassLoader* class_loader)
57 REQUIRES(!Locks::lambda_table_lock_, !Roles::uninterruptible_)
58 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070059
60 // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
Igor Murashkinb1d8c312015-08-04 11:18:43 -070061 bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
Mathieu Chartier90443472015-07-16 20:32:27 -070062 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070063
64 // Sweep weak references to lambda boxes. Update the addresses if the objects have been
65 // moved, and delete them from the table if the objects have been cleaned up.
66 void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -070067 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070068
69 // GC callback: Temporarily block anyone from touching the map.
70 void DisallowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070071 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070072
73 // GC callback: Unblock any readers who have been queued waiting to touch the map.
74 void AllowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070075 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070076
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -070077 // GC callback: Unblock any readers who have been queued waiting to touch the map.
78 void BroadcastForNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070079 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070080
81 BoxTable();
Igor Murashkin6918bf12015-09-27 19:19:06 -070082 ~BoxTable();
Igor Murashkine2facc52015-07-10 13:49:08 -070083
84 private:
Igor Murashkine2facc52015-07-10 13:49:08 -070085 // Explanation:
86 // - After all threads are suspended (exclusive mutator lock),
87 // the concurrent-copying GC can move objects from the "from" space to the "to" space.
88 // If an object is moved at that time and *before* SweepSystemWeaks are called then
89 // we don't know if the move has happened yet.
90 // Successive reads will then (incorrectly) look at the objects in the "from" space,
91 // which is a problem since the objects have been already forwarded and mutations
92 // would not be visible in the right space.
93 // Instead, use a GcRoot here which will be automatically updated by the GC.
94 //
95 // Also, any reads should be protected by a read barrier to always give us the "to" space address.
96 using ValueType = GcRoot<mirror::Object>;
97
98 // Attempt to look up the lambda in the map, or return null if it's not there yet.
99 ValueType FindBoxedLambda(const ClosureType& closure) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700100 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -0700101
102 // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
103 void BlockUntilWeaksAllowed()
Mathieu Chartier90443472015-07-16 20:32:27 -0700104 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -0700105
Igor Murashkin6918bf12015-09-27 19:19:06 -0700106 // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
107 using UnorderedMapKeyType = ClosureType;
108
Igor Murashkine2facc52015-07-10 13:49:08 -0700109 // EmptyFn implementation for art::HashMap
110 struct EmptyFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700111 void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
112 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
113
114 bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
Igor Murashkine2facc52015-07-10 13:49:08 -0700115 };
116
117 // HashFn implementation for art::HashMap
118 struct HashFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700119 size_t operator()(const UnorderedMapKeyType& key) const
120 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
Igor Murashkine2facc52015-07-10 13:49:08 -0700121 };
122
123 // EqualsFn implementation for art::HashMap
124 struct EqualsFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700125 bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
126 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
Igor Murashkine2facc52015-07-10 13:49:08 -0700127 };
128
Igor Murashkin6918bf12015-09-27 19:19:06 -0700129 using UnorderedMap = art::HashMap<UnorderedMapKeyType,
Igor Murashkine2facc52015-07-10 13:49:08 -0700130 ValueType,
131 EmptyFn,
132 HashFn,
133 EqualsFn,
134 TrackingAllocator<std::pair<ClosureType, ValueType>,
135 kAllocatorTagLambdaBoxTable>>;
136
Igor Murashkin457e8742015-10-22 17:37:50 -0700137 using ClassMap = art::HashMap<std::string,
138 GcRoot<mirror::Class>,
139 EmptyFn,
140 HashFn,
141 EqualsFn,
142 TrackingAllocator<std::pair<ClosureType, ValueType>,
143 kAllocatorTagLambdaProxyClassBoxTable>>;
144
Igor Murashkine2facc52015-07-10 13:49:08 -0700145 UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
Igor Murashkin457e8742015-10-22 17:37:50 -0700146 UnorderedMap classes_map_ GUARDED_BY(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -0700147 bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
148 ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
149
150 // Shrink the map when we get below this load factor.
151 // (This is an arbitrary value that should be large enough to prevent aggressive map erases
152 // from shrinking the table too often.)
153 static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
154
155 DISALLOW_COPY_AND_ASSIGN(BoxTable);
156};
157
158} // namespace lambda
159} // namespace art
160
161#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_