blob: adb733271e8d3c4cfc986deaa71bbe2a894be89b [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
17#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
18
19#include "base/allocator.h"
20#include "base/hash_map.h"
21#include "gc_root.h"
22#include "base/macros.h"
23#include "base/mutex.h"
24#include "object_callbacks.h"
25
26#include <stdint.h>
27
28namespace art {
29
30class ArtMethod; // forward declaration
31
32namespace mirror {
33class Object; // forward declaration
34} // namespace mirror
35
36namespace lambda {
Igor Murashkin6918bf12015-09-27 19:19:06 -070037struct Closure; // forward declaration
Igor Murashkine2facc52015-07-10 13:49:08 -070038
39/*
40 * Store a table of boxed lambdas. This is required to maintain object referential equality
41 * when a lambda is re-boxed.
42 *
43 * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
44 * When too many objects get GCd, we shrink the underlying table to use less space.
45 */
46class BoxTable FINAL {
47 public:
Igor Murashkin6918bf12015-09-27 19:19:06 -070048 using ClosureType = art::lambda::Closure*;
Igor Murashkine2facc52015-07-10 13:49:08 -070049
50 // Boxes a closure into an object. Returns null and throws an exception on failure.
Nicolas Geoffray3a090922015-11-24 09:17:30 +000051 mirror::Object* BoxLambda(const ClosureType& closure)
52 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070053
54 // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
Igor Murashkinb1d8c312015-08-04 11:18:43 -070055 bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
Mathieu Chartier90443472015-07-16 20:32:27 -070056 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070057
58 // Sweep weak references to lambda boxes. Update the addresses if the objects have been
59 // moved, and delete them from the table if the objects have been cleaned up.
60 void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -070061 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070062
63 // GC callback: Temporarily block anyone from touching the map.
64 void DisallowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070065 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070066
67 // GC callback: Unblock any readers who have been queued waiting to touch the map.
68 void AllowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070069 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070070
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -070071 // GC callback: Unblock any readers who have been queued waiting to touch the map.
72 void BroadcastForNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070073 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070074
75 BoxTable();
Igor Murashkin6918bf12015-09-27 19:19:06 -070076 ~BoxTable();
Igor Murashkine2facc52015-07-10 13:49:08 -070077
78 private:
Igor Murashkine2facc52015-07-10 13:49:08 -070079 // Explanation:
80 // - After all threads are suspended (exclusive mutator lock),
81 // the concurrent-copying GC can move objects from the "from" space to the "to" space.
82 // If an object is moved at that time and *before* SweepSystemWeaks are called then
83 // we don't know if the move has happened yet.
84 // Successive reads will then (incorrectly) look at the objects in the "from" space,
85 // which is a problem since the objects have been already forwarded and mutations
86 // would not be visible in the right space.
87 // Instead, use a GcRoot here which will be automatically updated by the GC.
88 //
89 // Also, any reads should be protected by a read barrier to always give us the "to" space address.
90 using ValueType = GcRoot<mirror::Object>;
91
92 // Attempt to look up the lambda in the map, or return null if it's not there yet.
93 ValueType FindBoxedLambda(const ClosureType& closure) const
Mathieu Chartier90443472015-07-16 20:32:27 -070094 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070095
96 // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
97 void BlockUntilWeaksAllowed()
Mathieu Chartier90443472015-07-16 20:32:27 -070098 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070099
Igor Murashkin6918bf12015-09-27 19:19:06 -0700100 // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
101 using UnorderedMapKeyType = ClosureType;
102
Igor Murashkine2facc52015-07-10 13:49:08 -0700103 // EmptyFn implementation for art::HashMap
104 struct EmptyFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700105 void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
106 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
107
108 bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
Igor Murashkine2facc52015-07-10 13:49:08 -0700109 };
110
111 // HashFn implementation for art::HashMap
112 struct HashFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700113 size_t operator()(const UnorderedMapKeyType& key) const
114 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
Igor Murashkine2facc52015-07-10 13:49:08 -0700115 };
116
117 // EqualsFn implementation for art::HashMap
118 struct EqualsFn {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700119 bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
120 NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
Igor Murashkine2facc52015-07-10 13:49:08 -0700121 };
122
Igor Murashkin6918bf12015-09-27 19:19:06 -0700123 using UnorderedMap = art::HashMap<UnorderedMapKeyType,
Igor Murashkine2facc52015-07-10 13:49:08 -0700124 ValueType,
125 EmptyFn,
126 HashFn,
127 EqualsFn,
128 TrackingAllocator<std::pair<ClosureType, ValueType>,
129 kAllocatorTagLambdaBoxTable>>;
130
131 UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -0700132 bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
133 ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
134
135 // Shrink the map when we get below this load factor.
136 // (This is an arbitrary value that should be large enough to prevent aggressive map erases
137 // from shrinking the table too often.)
138 static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
139
140 DISALLOW_COPY_AND_ASSIGN(BoxTable);
141};
142
143} // namespace lambda
144} // namespace art
145
146#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_