blob: 12d3ff3ac6ff56a3dd0e67090117489cf0fdf15a [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
17#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
18
19#include "base/allocator.h"
20#include "base/hash_map.h"
21#include "gc_root.h"
22#include "base/macros.h"
23#include "base/mutex.h"
24#include "object_callbacks.h"
25
26#include <stdint.h>
27
28namespace art {
29
30class ArtMethod; // forward declaration
31
32namespace mirror {
33class Object; // forward declaration
34} // namespace mirror
35
36namespace lambda {
37
38/*
39 * Store a table of boxed lambdas. This is required to maintain object referential equality
40 * when a lambda is re-boxed.
41 *
42 * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
43 * When too many objects get GCd, we shrink the underlying table to use less space.
44 */
45class BoxTable FINAL {
46 public:
47 using ClosureType = art::ArtMethod*;
48
49 // Boxes a closure into an object. Returns null and throws an exception on failure.
50 mirror::Object* BoxLambda(const ClosureType& closure)
51 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
52 LOCKS_EXCLUDED(Locks::lambda_table_lock_);
53
54 // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
55 bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
56 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
57
58 // Sweep weak references to lambda boxes. Update the addresses if the objects have been
59 // moved, and delete them from the table if the objects have been cleaned up.
60 void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
61 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
62 LOCKS_EXCLUDED(Locks::lambda_table_lock_);
63
64 // GC callback: Temporarily block anyone from touching the map.
65 void DisallowNewWeakBoxedLambdas()
66 LOCKS_EXCLUDED(Locks::lambda_table_lock_);
67
68 // GC callback: Unblock any readers who have been queued waiting to touch the map.
69 void AllowNewWeakBoxedLambdas()
70 LOCKS_EXCLUDED(Locks::lambda_table_lock_);
71
72 // GC callback: Verify that the state is now blocking anyone from touching the map.
73 void EnsureNewWeakBoxedLambdasDisallowed()
74 LOCKS_EXCLUDED(Locks::lambda_table_lock_);
75
76 BoxTable();
77 ~BoxTable() = default;
78
79 private:
80 // FIXME: This needs to be a GcRoot.
81 // Explanation:
82 // - After all threads are suspended (exclusive mutator lock),
83 // the concurrent-copying GC can move objects from the "from" space to the "to" space.
84 // If an object is moved at that time and *before* SweepSystemWeaks are called then
85 // we don't know if the move has happened yet.
86 // Successive reads will then (incorrectly) look at the objects in the "from" space,
87 // which is a problem since the objects have been already forwarded and mutations
88 // would not be visible in the right space.
89 // Instead, use a GcRoot here which will be automatically updated by the GC.
90 //
91 // Also, any reads should be protected by a read barrier to always give us the "to" space address.
92 using ValueType = GcRoot<mirror::Object>;
93
94 // Attempt to look up the lambda in the map, or return null if it's not there yet.
95 ValueType FindBoxedLambda(const ClosureType& closure) const
96 SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
97
98 // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
99 void BlockUntilWeaksAllowed()
100 SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
101
102 // EmptyFn implementation for art::HashMap
103 struct EmptyFn {
104 void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
105 item.first = nullptr;
106 }
107 bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
108 return item.first == nullptr;
109 }
110 };
111
112 // HashFn implementation for art::HashMap
113 struct HashFn {
114 size_t operator()(const ClosureType& key) const {
115 // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
116 return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
117 }
118 };
119
120 // EqualsFn implementation for art::HashMap
121 struct EqualsFn {
122 bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
123 };
124
125 using UnorderedMap = art::HashMap<ClosureType,
126 ValueType,
127 EmptyFn,
128 HashFn,
129 EqualsFn,
130 TrackingAllocator<std::pair<ClosureType, ValueType>,
131 kAllocatorTagLambdaBoxTable>>;
132
133 UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
134 bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
135 ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
136
137 // Shrink the map when we get below this load factor.
138 // (This is an arbitrary value that should be large enough to prevent aggressive map erases
139 // from shrinking the table too often.)
140 static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
141
142 DISALLOW_COPY_AND_ASSIGN(BoxTable);
143};
144
145} // namespace lambda
146} // namespace art
147
148#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_