blob: 312d811b9b689291e627908d6e95ef4bc1a1b2dc [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
17#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
18
19#include "base/allocator.h"
20#include "base/hash_map.h"
21#include "gc_root.h"
22#include "base/macros.h"
23#include "base/mutex.h"
24#include "object_callbacks.h"
25
26#include <stdint.h>
27
28namespace art {
29
30class ArtMethod; // forward declaration
31
32namespace mirror {
33class Object; // forward declaration
34} // namespace mirror
35
36namespace lambda {
37
38/*
39 * Store a table of boxed lambdas. This is required to maintain object referential equality
40 * when a lambda is re-boxed.
41 *
42 * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
43 * When too many objects get GCd, we shrink the underlying table to use less space.
44 */
45class BoxTable FINAL {
46 public:
47 using ClosureType = art::ArtMethod*;
48
49 // Boxes a closure into an object. Returns null and throws an exception on failure.
50 mirror::Object* BoxLambda(const ClosureType& closure)
Mathieu Chartier90443472015-07-16 20:32:27 -070051 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070052
53 // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
54 bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
Mathieu Chartier90443472015-07-16 20:32:27 -070055 SHARED_REQUIRES(Locks::mutator_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070056
57 // Sweep weak references to lambda boxes. Update the addresses if the objects have been
58 // moved, and delete them from the table if the objects have been cleaned up.
59 void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -070060 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070061
62 // GC callback: Temporarily block anyone from touching the map.
63 void DisallowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070064 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070065
66 // GC callback: Unblock any readers who have been queued waiting to touch the map.
67 void AllowNewWeakBoxedLambdas()
Mathieu Chartier90443472015-07-16 20:32:27 -070068 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070069
70 // GC callback: Verify that the state is now blocking anyone from touching the map.
71 void EnsureNewWeakBoxedLambdasDisallowed()
Mathieu Chartier90443472015-07-16 20:32:27 -070072 REQUIRES(!Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070073
74 BoxTable();
75 ~BoxTable() = default;
76
77 private:
78 // FIXME: This needs to be a GcRoot.
79 // Explanation:
80 // - After all threads are suspended (exclusive mutator lock),
81 // the concurrent-copying GC can move objects from the "from" space to the "to" space.
82 // If an object is moved at that time and *before* SweepSystemWeaks are called then
83 // we don't know if the move has happened yet.
84 // Successive reads will then (incorrectly) look at the objects in the "from" space,
85 // which is a problem since the objects have been already forwarded and mutations
86 // would not be visible in the right space.
87 // Instead, use a GcRoot here which will be automatically updated by the GC.
88 //
89 // Also, any reads should be protected by a read barrier to always give us the "to" space address.
90 using ValueType = GcRoot<mirror::Object>;
91
92 // Attempt to look up the lambda in the map, or return null if it's not there yet.
93 ValueType FindBoxedLambda(const ClosureType& closure) const
Mathieu Chartier90443472015-07-16 20:32:27 -070094 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070095
96 // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
97 void BlockUntilWeaksAllowed()
Mathieu Chartier90443472015-07-16 20:32:27 -070098 SHARED_REQUIRES(Locks::lambda_table_lock_);
Igor Murashkine2facc52015-07-10 13:49:08 -070099
100 // EmptyFn implementation for art::HashMap
101 struct EmptyFn {
102 void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
103 item.first = nullptr;
104 }
105 bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
106 return item.first == nullptr;
107 }
108 };
109
110 // HashFn implementation for art::HashMap
111 struct HashFn {
112 size_t operator()(const ClosureType& key) const {
113 // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
114 return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
115 }
116 };
117
118 // EqualsFn implementation for art::HashMap
119 struct EqualsFn {
120 bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
121 };
122
123 using UnorderedMap = art::HashMap<ClosureType,
124 ValueType,
125 EmptyFn,
126 HashFn,
127 EqualsFn,
128 TrackingAllocator<std::pair<ClosureType, ValueType>,
129 kAllocatorTagLambdaBoxTable>>;
130
131 UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
132 bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
133 ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
134
135 // Shrink the map when we get below this load factor.
136 // (This is an arbitrary value that should be large enough to prevent aggressive map erases
137 // from shrinking the table too often.)
138 static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
139
140 DISALLOW_COPY_AND_ASSIGN(BoxTable);
141};
142
143} // namespace lambda
144} // namespace art
145
146#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_