blob: 9918bb71f3017542594b4180900bd8cf4ac4385d [file] [log] [blame]
Igor Murashkine2facc52015-07-10 13:49:08 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "lambda/box_table.h"
17
18#include "base/mutex.h"
19#include "common_throws.h"
20#include "gc_root-inl.h"
Igor Murashkin6918bf12015-09-27 19:19:06 -070021#include "lambda/closure.h"
22#include "lambda/leaking_allocator.h"
Igor Murashkine2facc52015-07-10 13:49:08 -070023#include "mirror/method.h"
24#include "mirror/object-inl.h"
25#include "thread.h"
26
27#include <vector>
28
29namespace art {
30namespace lambda {
Nicolas Geoffray3a090922015-11-24 09:17:30 +000031// Temporarily represent the lambda Closure as its raw bytes in an array.
32// TODO: Generate a proxy class for the closure when boxing the first time.
33using BoxedClosurePointerType = mirror::ByteArray*;
Igor Murashkin6918bf12015-09-27 19:19:06 -070034
Nicolas Geoffray3a090922015-11-24 09:17:30 +000035static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
36 return mirror::ByteArray::GetArrayClass();
Igor Murashkin6918bf12015-09-27 19:19:06 -070037}
38
39namespace {
40 // Convenience functions to allocating/deleting box table copies of the closures.
41 struct ClosureAllocator {
42 // Deletes a Closure that was allocated through ::Allocate.
43 static void Delete(Closure* ptr) {
44 delete[] reinterpret_cast<char*>(ptr);
45 }
46
47 // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
48 static Closure* Allocate(size_t size) {
49 DCHECK_GE(size, sizeof(Closure));
50
51 // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
52 Closure* closure = reinterpret_cast<Closure*>(new char[size]);
53 DCHECK_ALIGNED(closure, alignof(Closure));
54 return closure;
55 }
56 };
57} // namespace
Igor Murashkine2facc52015-07-10 13:49:08 -070058
59BoxTable::BoxTable()
60 : allow_new_weaks_(true),
61 new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
62
Igor Murashkin6918bf12015-09-27 19:19:06 -070063BoxTable::~BoxTable() {
64 // Free all the copies of our closures.
Mathieu Chartier055b5f32015-11-18 10:24:43 -080065 for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
Igor Murashkin6918bf12015-09-27 19:19:06 -070066 std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
67
68 Closure* closure = key_value_pair.first;
69
70 // Remove from the map first, so that it doesn't try to access dangling pointer.
71 map_iterator = map_.Erase(map_iterator);
72
73 // Safe to delete, no dangling pointers.
74 ClosureAllocator::Delete(closure);
75 }
76}
77
Nicolas Geoffray3a090922015-11-24 09:17:30 +000078mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
Igor Murashkine2facc52015-07-10 13:49:08 -070079 Thread* self = Thread::Current();
80
81 {
82 // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
83 /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
84 BlockUntilWeaksAllowed();
85
86 // Attempt to look up this object, it's possible it was already boxed previously.
87 // If this is the case we *must* return the same object as before to maintain
88 // referential equality.
89 //
90 // In managed code:
91 // Functional f = () -> 5; // vF = create-lambda
92 // Object a = f; // vA = box-lambda vA
93 // Object b = f; // vB = box-lambda vB
Nicolas Geoffray3a090922015-11-24 09:17:30 +000094 // assert(a == f)
Igor Murashkine2facc52015-07-10 13:49:08 -070095 ValueType value = FindBoxedLambda(closure);
96 if (!value.IsNull()) {
97 return value.Read();
98 }
99
100 // Otherwise we need to box ourselves and insert it into the hash map
101 }
102
Igor Murashkin457e8742015-10-22 17:37:50 -0700103 // Release the lambda table lock here, so that thread suspension is allowed.
Igor Murashkin457e8742015-10-22 17:37:50 -0700104
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000105 // Convert the Closure into a managed byte[] which will serve
106 // as the temporary 'boxed' version of the lambda. This is good enough
107 // to check all the basic object identities that a boxed lambda must retain.
108 // It's also good enough to contain all the captured primitive variables.
Igor Murashkin457e8742015-10-22 17:37:50 -0700109
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000110 // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
111 // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
112 BoxedClosurePointerType closure_as_array_object =
113 mirror::ByteArray::Alloc(self, closure->GetSize());
Igor Murashkin457e8742015-10-22 17:37:50 -0700114
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000115 // There are no thread suspension points after this, so we don't need to put it into a handle.
116
117 if (UNLIKELY(closure_as_array_object == nullptr)) {
Igor Murashkine2facc52015-07-10 13:49:08 -0700118 // Most likely an OOM has occurred.
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000119 CHECK(self->IsExceptionPending());
Igor Murashkine2facc52015-07-10 13:49:08 -0700120 return nullptr;
121 }
122
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000123 // Write the raw closure data into the byte[].
124 closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
125 0 /*index*/), // index
126 closure_as_array_object->GetLength());
Igor Murashkin6918bf12015-09-27 19:19:06 -0700127
Igor Murashkine2facc52015-07-10 13:49:08 -0700128 // The method has been successfully boxed into an object, now insert it into the hash map.
129 {
130 MutexLock mu(self, *Locks::lambda_table_lock_);
131 BlockUntilWeaksAllowed();
132
133 // Lookup the object again, it's possible another thread already boxed it while
134 // we were allocating the object before.
135 ValueType value = FindBoxedLambda(closure);
136 if (UNLIKELY(!value.IsNull())) {
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000137 // Let the GC clean up method_as_object at a later time.
Igor Murashkine2facc52015-07-10 13:49:08 -0700138 return value.Read();
139 }
140
Igor Murashkin6918bf12015-09-27 19:19:06 -0700141 // Otherwise we need to insert it into the hash map in this thread.
142
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000143 // Make a copy for the box table to keep, in case the closure gets collected from the stack.
144 // TODO: GC may need to sweep for roots in the box table's copy of the closure.
145 Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
146 closure->CopyTo(closure_table_copy, closure->GetSize());
147
148 // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700149
150 // Actually insert into the table.
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000151 map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
Igor Murashkine2facc52015-07-10 13:49:08 -0700152 }
153
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000154 return closure_as_array_object;
Igor Murashkine2facc52015-07-10 13:49:08 -0700155}
156
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700157bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
158 DCHECK(object != nullptr);
Igor Murashkine2facc52015-07-10 13:49:08 -0700159 *out_closure = nullptr;
160
Igor Murashkin6918bf12015-09-27 19:19:06 -0700161 Thread* self = Thread::Current();
162
Igor Murashkine2facc52015-07-10 13:49:08 -0700163 // Note that we do not need to access lambda_table_lock_ here
164 // since we don't need to look at the map.
165
166 mirror::Object* boxed_closure_object = object;
167
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000168 // Raise ClassCastException if object is not instanceof byte[]
169 if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
170 ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
Igor Murashkine2facc52015-07-10 13:49:08 -0700171 return false;
172 }
173
174 // TODO(iam): We must check that the closure object extends/implements the type
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000175 // specified in [type id]. This is not currently implemented since it's always a byte[].
Igor Murashkine2facc52015-07-10 13:49:08 -0700176
177 // If we got this far, the inputs are valid.
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000178 // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
179 BoxedClosurePointerType boxed_closure_as_array =
Igor Murashkin6918bf12015-09-27 19:19:06 -0700180 down_cast<BoxedClosurePointerType>(boxed_closure_object);
Igor Murashkine2facc52015-07-10 13:49:08 -0700181
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000182 const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
Igor Murashkin6918bf12015-09-27 19:19:06 -0700183
184 // Allocate a copy that can "escape" and copy the closure data into that.
185 Closure* unboxed_closure =
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000186 LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
Igor Murashkin6918bf12015-09-27 19:19:06 -0700187 // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000188 memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
Igor Murashkin6918bf12015-09-27 19:19:06 -0700189
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000190 DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
Igor Murashkine2facc52015-07-10 13:49:08 -0700191
192 *out_closure = unboxed_closure;
193 return true;
194}
195
196BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
197 auto map_iterator = map_.Find(closure);
198 if (map_iterator != map_.end()) {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700199 const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
Igor Murashkine2facc52015-07-10 13:49:08 -0700200 const ValueType& value = key_value_pair.second;
201
202 DCHECK(!value.IsNull()); // Never store null boxes.
203 return value;
204 }
205
206 return ValueType(nullptr);
207}
208
209void BoxTable::BlockUntilWeaksAllowed() {
210 Thread* self = Thread::Current();
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700211 while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
212 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
Igor Murashkine2facc52015-07-10 13:49:08 -0700213 new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
214 }
215}
216
217void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
218 DCHECK(visitor != nullptr);
219
220 Thread* self = Thread::Current();
221 MutexLock mu(self, *Locks::lambda_table_lock_);
222
223 /*
224 * Visit every weak root in our lambda box table.
225 * Remove unmarked objects, update marked objects to new address.
226 */
227 std::vector<ClosureType> remove_list;
228 for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
Igor Murashkin6918bf12015-09-27 19:19:06 -0700229 std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
Igor Murashkine2facc52015-07-10 13:49:08 -0700230
231 const ValueType& old_value = key_value_pair.second;
232
233 // This does not need a read barrier because this is called by GC.
234 mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
235 mirror::Object* new_value = visitor->IsMarked(old_value_raw);
236
237 if (new_value == nullptr) {
Nicolas Geoffray7bbb80a2015-09-27 19:50:40 +0000238 // The object has been swept away.
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000239 const ClosureType& closure = key_value_pair.first;
Igor Murashkin6918bf12015-09-27 19:19:06 -0700240
Igor Murashkine2facc52015-07-10 13:49:08 -0700241 // Delete the entry from the map.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700242 map_iterator = map_.Erase(map_iterator);
243
244 // Clean up the memory by deleting the closure.
245 ClosureAllocator::Delete(closure);
246
Igor Murashkine2facc52015-07-10 13:49:08 -0700247 } else {
248 // The object has been moved.
249 // Update the map.
250 key_value_pair.second = ValueType(new_value);
251 ++map_iterator;
252 }
253 }
254
255 // Occasionally shrink the map to avoid growing very large.
256 if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
257 map_.ShrinkToMaximumLoad();
258 }
259}
260
261void BoxTable::DisallowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700262 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700263 Thread* self = Thread::Current();
264 MutexLock mu(self, *Locks::lambda_table_lock_);
265
266 allow_new_weaks_ = false;
267}
268
269void BoxTable::AllowNewWeakBoxedLambdas() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700270 CHECK(!kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700271 Thread* self = Thread::Current();
272 MutexLock mu(self, *Locks::lambda_table_lock_);
273
274 allow_new_weaks_ = true;
275 new_weaks_condition_.Broadcast(self);
276}
277
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700278void BoxTable::BroadcastForNewWeakBoxedLambdas() {
279 CHECK(kUseReadBarrier);
Igor Murashkine2facc52015-07-10 13:49:08 -0700280 Thread* self = Thread::Current();
281 MutexLock mu(self, *Locks::lambda_table_lock_);
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700282 new_weaks_condition_.Broadcast(self);
Igor Murashkine2facc52015-07-10 13:49:08 -0700283}
284
Igor Murashkin6918bf12015-09-27 19:19:06 -0700285void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
286 item.first = nullptr;
287
288 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
289 item.second = ValueType(); // Also clear the GC root.
290}
291
292bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
Nicolas Geoffray3a090922015-11-24 09:17:30 +0000293 return item.first == nullptr;
Igor Murashkin6918bf12015-09-27 19:19:06 -0700294}
295
296bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
297 const UnorderedMapKeyType& rhs) const {
Igor Murashkine2facc52015-07-10 13:49:08 -0700298 // Nothing needs this right now, but leave this assertion for later when
299 // we need to look at the references inside of the closure.
Igor Murashkin6918bf12015-09-27 19:19:06 -0700300 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Igor Murashkine2facc52015-07-10 13:49:08 -0700301
Igor Murashkin6918bf12015-09-27 19:19:06 -0700302 return lhs->ReferenceEquals(rhs);
303}
304
305size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
306 const lambda::Closure* closure = key;
307 DCHECK_ALIGNED(closure, alignof(lambda::Closure));
308
309 // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
310 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
311 return closure->GetHashCode();
Igor Murashkine2facc52015-07-10 13:49:08 -0700312}
313
314} // namespace lambda
315} // namespace art