blob: 0032d081c6d13279a1e65642c78d0650ed2ab982 [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "lambda/box_table.h"
17
18#include "base/mutex.h"
19#include "common_throws.h"
20#include "gc_root-inl.h"
Igor Murashkin457e8742015-10-22 17:37:50 -070021#include "lambda/box_class_table.h"
Igor Murashkin6918bf12015-09-27 19:19:06 -070022#include "lambda/closure.h"
23#include "lambda/leaking_allocator.h"
Igor Murashkin457e8742015-10-22 17:37:50 -070024#include "mirror/lambda_proxy.h"
Igor Murashkine2facc52015-07-10 13:49:08 -070025#include "mirror/method.h"
26#include "mirror/object-inl.h"
27#include "thread.h"
28
29#include <vector>
30
31namespace art {
32namespace lambda {
Igor Murashkin457e8742015-10-22 17:37:50 -070033// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface.
34using BoxedClosurePointerType = mirror::LambdaProxy*;
Igor Murashkin6918bf12015-09-27 19:19:06 -070035
Igor Murashkin457e8742015-10-22 17:37:50 -070036// Returns the base class for all boxed closures.
37// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy.
38static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) {
39 return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy);
Igor Murashkin6918bf12015-09-27 19:19:06 -070040}
41
42namespace {
43 // Convenience functions to allocating/deleting box table copies of the closures.
44 struct ClosureAllocator {
45 // Deletes a Closure that was allocated through ::Allocate.
46 static void Delete(Closure* ptr) {
47 delete[] reinterpret_cast<char*>(ptr);
48 }
49
50 // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
51 static Closure* Allocate(size_t size) {
52 DCHECK_GE(size, sizeof(Closure));
53
54 // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
55 Closure* closure = reinterpret_cast<Closure*>(new char[size]);
56 DCHECK_ALIGNED(closure, alignof(Closure));
57 return closure;
58 }
59 };
Igor Murashkin457e8742015-10-22 17:37:50 -070060
61 struct DeleterForClosure {
62 void operator()(Closure* closure) const {
63 ClosureAllocator::Delete(closure);
64 }
65 };
66
67 using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>;
Igor Murashkin6918bf12015-09-27 19:19:06 -070068} // namespace
Igor Murashkine2facc52015-07-10 13:49:08 -070069
70BoxTable::BoxTable()
71 : allow_new_weaks_(true),
72 new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
73
Igor Murashkin6918bf12015-09-27 19:19:06 -070074BoxTable::~BoxTable() {
75 // Free all the copies of our closures.
Mathieu Chartier055b5f32015-11-18 10:24:43 -080076 for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
Igor Murashkin6918bf12015-09-27 19:19:06 -070077 std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
78
79 Closure* closure = key_value_pair.first;
80
81 // Remove from the map first, so that it doesn't try to access dangling pointer.
82 map_iterator = map_.Erase(map_iterator);
83
84 // Safe to delete, no dangling pointers.
85 ClosureAllocator::Delete(closure);
86 }
87}
88
Igor Murashkin457e8742015-10-22 17:37:50 -070089mirror::Object* BoxTable::BoxLambda(const ClosureType& closure,
90 const char* class_name,
91 mirror::ClassLoader* class_loader) {
Igor Murashkine2facc52015-07-10 13:49:08 -070092 Thread* self = Thread::Current();
93
94 {
95 // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
96 /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
97 BlockUntilWeaksAllowed();
98
99 // Attempt to look up this object, it's possible it was already boxed previously.
100 // If this is the case we *must* return the same object as before to maintain
101 // referential equality.
102 //
103 // In managed code:
104 // Functional f = () -> 5; // vF = create-lambda
105 // Object a = f; // vA = box-lambda vA
106 // Object b = f; // vB = box-lambda vB
Igor Murashkin457e8742015-10-22 17:37:50 -0700107 // assert(a == b)
Igor Murashkine2facc52015-07-10 13:49:08 -0700108 ValueType value = FindBoxedLambda(closure);
109 if (!value.IsNull()) {
110 return value.Read();
111 }
112
113 // Otherwise we need to box ourselves and insert it into the hash map
114 }
115
Igor Murashkin457e8742015-10-22 17:37:50 -0700116 // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy.
Igor Murashkine2facc52015-07-10 13:49:08 -0700117
Igor Murashkine2facc52015-07-10 13:49:08 -0700118 // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
Igor Murashkin457e8742015-10-22 17:37:50 -0700119 StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4]
Igor Murashkin6918bf12015-09-27 19:19:06 -0700120
Igor Murashkin457e8742015-10-22 17:37:50 -0700121 Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader);
Igor Murashkine2facc52015-07-10 13:49:08 -0700122
Igor Murashkin457e8742015-10-22 17:37:50 -0700123 // Release the lambda table lock here, so that thread suspension is allowed.
124 self->AllowThreadSuspension();
125
126 lambda::BoxClassTable* lambda_box_class_table;
127
128 // Find the lambda box class table, which can be in the system class loader if classloader is null
129 if (class_loader == nullptr) {
130 ScopedObjectAccessUnchecked soa(self);
131 mirror::ClassLoader* system_class_loader =
132 soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader());
133 lambda_box_class_table = system_class_loader->GetLambdaProxyCache();
134 } else {
135 lambda_box_class_table = class_loader_handle->GetLambdaProxyCache();
136 // OK: can't be deleted while we hold a handle to the class loader.
137 }
138 DCHECK(lambda_box_class_table != nullptr);
139
140 Handle<mirror::Class> closure_class(hs.NewHandle(
141 lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle)));
142 if (UNLIKELY(closure_class.Get() == nullptr)) {
Igor Murashkine2facc52015-07-10 13:49:08 -0700143 // Most likely an OOM has occurred.
Igor Murashkin457e8742015-10-22 17:37:50 -0700144 self->AssertPendingException();
Igor Murashkine2facc52015-07-10 13:49:08 -0700145 return nullptr;
146 }
147
Igor Murashkin457e8742015-10-22 17:37:50 -0700148 BoxedClosurePointerType closure_as_object = nullptr;
149 UniqueClosurePtr closure_table_copy;
150 // Create an instance of the class, and assign the pointer to the closure into it.
151 {
152 closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self));
153 if (UNLIKELY(closure_as_object == nullptr)) {
154 self->AssertPendingOOMException();
155 return nullptr;
156 }
157
158 // Make a copy of the closure that we will store in the hash map.
159 // The proxy instance will also point to this same hash map.
160 // Note that the closure pointer is cleaned up only after the proxy is GCd.
161 closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize()));
162 closure_as_object->SetClosure(closure_table_copy.get());
163 }
164
165 // There are no thread suspension points after this, so we don't need to put it into a handle.
166 ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5]
167 "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5]
168
169 // Write the raw closure data into the proxy instance's copy of the closure.
170 closure->CopyTo(closure_table_copy.get(),
171 closure->GetSize());
Igor Murashkin6918bf12015-09-27 19:19:06 -0700172
Igor Murashkine2facc52015-07-10 13:49:08 -0700173 // The method has been successfully boxed into an object, now insert it into the hash map.
174 {
175 MutexLock mu(self, *Locks::lambda_table_lock_);
176 BlockUntilWeaksAllowed();
177
178 // Lookup the object again, it's possible another thread already boxed it while
179 // we were allocating the object before.
180 ValueType value = FindBoxedLambda(closure);
181 if (UNLIKELY(!value.IsNull())) {
Igor Murashkin457e8742015-10-22 17:37:50 -0700182 // Let the GC clean up closure_as_object at a later time.
183 // (We will not see this object when sweeping, it wasn't inserted yet.)
184 closure_as_object->SetClosure(nullptr);
Igor Murashkine2facc52015-07-10 13:49:08 -0700185 return value.Read();
186 }
187
Igor Murashkin6918bf12015-09-27 19:19:06 -0700188 // Otherwise we need to insert it into the hash map in this thread.
189
Igor Murashkin457e8742015-10-22 17:37:50 -0700190 // The closure_table_copy is deleted by us manually when we erase it from the map.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700191
192 // Actually insert into the table.
Igor Murashkin457e8742015-10-22 17:37:50 -0700193 map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)});
Igor Murashkine2facc52015-07-10 13:49:08 -0700194 }
195
Igor Murashkin457e8742015-10-22 17:37:50 -0700196 return closure_as_object;
Igor Murashkine2facc52015-07-10 13:49:08 -0700197}
198
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700199bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
200 DCHECK(object != nullptr);
Igor Murashkine2facc52015-07-10 13:49:08 -0700201 *out_closure = nullptr;
202
Igor Murashkin6918bf12015-09-27 19:19:06 -0700203 Thread* self = Thread::Current();
204
Igor Murashkine2facc52015-07-10 13:49:08 -0700205 // Note that we do not need to access lambda_table_lock_ here
206 // since we don't need to look at the map.
207
208 mirror::Object* boxed_closure_object = object;
209
Igor Murashkin457e8742015-10-22 17:37:50 -0700210 // Raise ClassCastException if object is not instanceof LambdaProxy
211 if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) {
212 ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass());
Igor Murashkine2facc52015-07-10 13:49:08 -0700213 return false;
214 }
215
216 // TODO(iam): We must check that the closure object extends/implements the type
Igor Murashkin457e8742015-10-22 17:37:50 -0700217 // specified in [type id]. This is not currently implemented since the type id is unavailable.
Igor Murashkine2facc52015-07-10 13:49:08 -0700218
219 // If we got this far, the inputs are valid.
Igor Murashkin457e8742015-10-22 17:37:50 -0700220 // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy,
221 // and return it.
222 BoxedClosurePointerType boxed_closure =
Igor Murashkin6918bf12015-09-27 19:19:06 -0700223 down_cast<BoxedClosurePointerType>(boxed_closure_object);
Igor Murashkine2facc52015-07-10 13:49:08 -0700224
Igor Murashkin457e8742015-10-22 17:37:50 -0700225 DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure));
226 const Closure* aligned_interior_closure = boxed_closure->GetClosure();
227 DCHECK(aligned_interior_closure != nullptr);
228
229 // TODO: we probably don't need to make a copy here later on, once there's GC support.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700230
231 // Allocate a copy that can "escape" and copy the closure data into that.
232 Closure* unboxed_closure =
Igor Murashkin457e8742015-10-22 17:37:50 -0700233 LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize());
234 DCHECK_ALIGNED(unboxed_closure, alignof(Closure));
Igor Murashkin6918bf12015-09-27 19:19:06 -0700235 // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
Igor Murashkin457e8742015-10-22 17:37:50 -0700236 memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize());
Igor Murashkin6918bf12015-09-27 19:19:06 -0700237
Igor Murashkin457e8742015-10-22 17:37:50 -0700238 DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize());
Igor Murashkine2facc52015-07-10 13:49:08 -0700239
240 *out_closure = unboxed_closure;
241 return true;
242}
243
244BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
245 auto map_iterator = map_.Find(closure);
246 if (map_iterator != map_.end()) {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700247 const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
Igor Murashkine2facc52015-07-10 13:49:08 -0700248 const ValueType& value = key_value_pair.second;
249
250 DCHECK(!value.IsNull()); // Never store null boxes.
251 return value;
252 }
253
254 return ValueType(nullptr);
255}
256
257void BoxTable::BlockUntilWeaksAllowed() {
258 Thread* self = Thread::Current();
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700259 while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
260 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
Igor Murashkine2facc52015-07-10 13:49:08 -0700261 new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
262 }
263}
264
265void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
266 DCHECK(visitor != nullptr);
267
268 Thread* self = Thread::Current();
269 MutexLock mu(self, *Locks::lambda_table_lock_);
270
271 /*
272 * Visit every weak root in our lambda box table.
273 * Remove unmarked objects, update marked objects to new address.
274 */
275 std::vector<ClosureType> remove_list;
276 for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700277 std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
Igor Murashkine2facc52015-07-10 13:49:08 -0700278
279 const ValueType& old_value = key_value_pair.second;
280
281 // This does not need a read barrier because this is called by GC.
282 mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
283 mirror::Object* new_value = visitor->IsMarked(old_value_raw);
284
285 if (new_value == nullptr) {
Nicolas Geoffray7bbb80a2015-09-27 19:50:40 +0000286 // The object has been swept away.
Igor Murashkin457e8742015-10-22 17:37:50 -0700287 Closure* closure = key_value_pair.first;
Igor Murashkin6918bf12015-09-27 19:19:06 -0700288
Igor Murashkine2facc52015-07-10 13:49:08 -0700289 // Delete the entry from the map.
Igor Murashkin457e8742015-10-22 17:37:50 -0700290 // (Remove from map first to avoid accessing dangling pointer).
Igor Murashkin6918bf12015-09-27 19:19:06 -0700291 map_iterator = map_.Erase(map_iterator);
292
293 // Clean up the memory by deleting the closure.
294 ClosureAllocator::Delete(closure);
295
Igor Murashkine2facc52015-07-10 13:49:08 -0700296 } else {
297 // The object has been moved.
298 // Update the map.
299 key_value_pair.second = ValueType(new_value);
300 ++map_iterator;
301 }
302 }
303
304 // Occasionally shrink the map to avoid growing very large.
305 if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
306 map_.ShrinkToMaximumLoad();
307 }
308}
309
310void BoxTable::DisallowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700311 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700312 Thread* self = Thread::Current();
313 MutexLock mu(self, *Locks::lambda_table_lock_);
314
315 allow_new_weaks_ = false;
316}
317
318void BoxTable::AllowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700319 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700320 Thread* self = Thread::Current();
321 MutexLock mu(self, *Locks::lambda_table_lock_);
322
323 allow_new_weaks_ = true;
324 new_weaks_condition_.Broadcast(self);
325}
326
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700327void BoxTable::BroadcastForNewWeakBoxedLambdas() {
328 CHECK(kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700329 Thread* self = Thread::Current();
330 MutexLock mu(self, *Locks::lambda_table_lock_);
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700331 new_weaks_condition_.Broadcast(self);
Igor Murashkine2facc52015-07-10 13:49:08 -0700332}
333
Igor Murashkin6918bf12015-09-27 19:19:06 -0700334void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
335 item.first = nullptr;
336
337 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
338 item.second = ValueType(); // Also clear the GC root.
339}
340
341bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
Igor Murashkin457e8742015-10-22 17:37:50 -0700342 bool is_empty = item.first == nullptr;
343 DCHECK_EQ(item.second.IsNull(), is_empty);
344
345 return is_empty;
Igor Murashkin6918bf12015-09-27 19:19:06 -0700346}
347
348bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
349 const UnorderedMapKeyType& rhs) const {
Igor Murashkine2facc52015-07-10 13:49:08 -0700350 // Nothing needs this right now, but leave this assertion for later when
351 // we need to look at the references inside of the closure.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700352 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Igor Murashkine2facc52015-07-10 13:49:08 -0700353
Igor Murashkin6918bf12015-09-27 19:19:06 -0700354 return lhs->ReferenceEquals(rhs);
355}
356
357size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
358 const lambda::Closure* closure = key;
359 DCHECK_ALIGNED(closure, alignof(lambda::Closure));
360
361 // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
362 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
363 return closure->GetHashCode();
Igor Murashkine2facc52015-07-10 13:49:08 -0700364}
365
366} // namespace lambda
367} // namespace art