blob: 26575fd9955136b6aea03550bd74090b7abe63eb [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "lambda/box_table.h"
17
18#include "base/mutex.h"
19#include "common_throws.h"
20#include "gc_root-inl.h"
21#include "mirror/method.h"
22#include "mirror/object-inl.h"
23#include "thread.h"
24
25#include <vector>
26
27namespace art {
28namespace lambda {
29
30BoxTable::BoxTable()
31 : allow_new_weaks_(true),
32 new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
33
34mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
35 Thread* self = Thread::Current();
36
37 {
38 // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
39 /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
40 BlockUntilWeaksAllowed();
41
42 // Attempt to look up this object, it's possible it was already boxed previously.
43 // If this is the case we *must* return the same object as before to maintain
44 // referential equality.
45 //
46 // In managed code:
47 // Functional f = () -> 5; // vF = create-lambda
48 // Object a = f; // vA = box-lambda vA
49 // Object b = f; // vB = box-lambda vB
50 // assert(a == f)
51 ValueType value = FindBoxedLambda(closure);
52 if (!value.IsNull()) {
53 return value.Read();
54 }
55
56 // Otherwise we need to box ourselves and insert it into the hash map
57 }
58
59 // Release the lambda table lock here, so that thread suspension is allowed.
60
61 // Convert the ArtMethod into a java.lang.reflect.Method which will serve
62 // as the temporary 'boxed' version of the lambda. This is good enough
63 // to check all the basic object identities that a boxed lambda must retain.
64
65 // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
66 // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
67 mirror::Method* method_as_object =
68 mirror::Method::CreateFromArtMethod(self, closure);
69 // There are no thread suspension points after this, so we don't need to put it into a handle.
70
71 if (UNLIKELY(method_as_object == nullptr)) {
72 // Most likely an OOM has occurred.
73 CHECK(self->IsExceptionPending());
74 return nullptr;
75 }
76
77 // The method has been successfully boxed into an object, now insert it into the hash map.
78 {
79 MutexLock mu(self, *Locks::lambda_table_lock_);
80 BlockUntilWeaksAllowed();
81
82 // Lookup the object again, it's possible another thread already boxed it while
83 // we were allocating the object before.
84 ValueType value = FindBoxedLambda(closure);
85 if (UNLIKELY(!value.IsNull())) {
86 // Let the GC clean up method_as_object at a later time.
87 return value.Read();
88 }
89
90 // Otherwise we should insert it into the hash map in this thread.
91 map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
92 }
93
94 return method_as_object;
95}
96
Igor Murashkinb1d8c312015-08-04 11:18:43 -070097bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
98 DCHECK(object != nullptr);
Igor Murashkine2facc52015-07-10 13:49:08 -070099 *out_closure = nullptr;
100
101 // Note that we do not need to access lambda_table_lock_ here
102 // since we don't need to look at the map.
103
104 mirror::Object* boxed_closure_object = object;
105
106 // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
107 if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
108 ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
109 return false;
110 }
111
112 // TODO(iam): We must check that the closure object extends/implements the type
113 // specified in [type id]. This is not currently implemented since it's always a Method.
114
115 // If we got this far, the inputs are valid.
116 // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
117 mirror::AbstractMethod* boxed_closure_as_method =
118 down_cast<mirror::AbstractMethod*>(boxed_closure_object);
119
120 ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
121 DCHECK(unboxed_closure != nullptr);
122
123 *out_closure = unboxed_closure;
124 return true;
125}
126
127BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
128 auto map_iterator = map_.Find(closure);
129 if (map_iterator != map_.end()) {
130 const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
131 const ValueType& value = key_value_pair.second;
132
133 DCHECK(!value.IsNull()); // Never store null boxes.
134 return value;
135 }
136
137 return ValueType(nullptr);
138}
139
140void BoxTable::BlockUntilWeaksAllowed() {
141 Thread* self = Thread::Current();
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700142 while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
143 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
Igor Murashkine2facc52015-07-10 13:49:08 -0700144 new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
145 }
146}
147
148void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
149 DCHECK(visitor != nullptr);
150
151 Thread* self = Thread::Current();
152 MutexLock mu(self, *Locks::lambda_table_lock_);
153
154 /*
155 * Visit every weak root in our lambda box table.
156 * Remove unmarked objects, update marked objects to new address.
157 */
158 std::vector<ClosureType> remove_list;
159 for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
160 std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
161
162 const ValueType& old_value = key_value_pair.second;
163
164 // This does not need a read barrier because this is called by GC.
165 mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
166 mirror::Object* new_value = visitor->IsMarked(old_value_raw);
167
168 if (new_value == nullptr) {
169 const ClosureType& closure = key_value_pair.first;
170 // The object has been swept away.
171 // Delete the entry from the map.
172 map_iterator = map_.Erase(map_.Find(closure));
173 } else {
174 // The object has been moved.
175 // Update the map.
176 key_value_pair.second = ValueType(new_value);
177 ++map_iterator;
178 }
179 }
180
181 // Occasionally shrink the map to avoid growing very large.
182 if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
183 map_.ShrinkToMaximumLoad();
184 }
185}
186
187void BoxTable::DisallowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700188 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700189 Thread* self = Thread::Current();
190 MutexLock mu(self, *Locks::lambda_table_lock_);
191
192 allow_new_weaks_ = false;
193}
194
195void BoxTable::AllowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700196 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700197 Thread* self = Thread::Current();
198 MutexLock mu(self, *Locks::lambda_table_lock_);
199
200 allow_new_weaks_ = true;
201 new_weaks_condition_.Broadcast(self);
202}
203
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700204void BoxTable::BroadcastForNewWeakBoxedLambdas() {
205 CHECK(kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700206 Thread* self = Thread::Current();
207 MutexLock mu(self, *Locks::lambda_table_lock_);
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700208 new_weaks_condition_.Broadcast(self);
Igor Murashkine2facc52015-07-10 13:49:08 -0700209}
210
211bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
212 // Nothing needs this right now, but leave this assertion for later when
213 // we need to look at the references inside of the closure.
214 if (kIsDebugBuild) {
215 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
216 }
217
218 // TODO: Need rework to use read barriers once closures have references inside of them that can
219 // move. Until then, it's safe to just compare the data inside of it directly.
220 return lhs == rhs;
221}
222
223} // namespace lambda
224} // namespace art