Revert "Revert "lambda: Experimental support for capture-variable and liberate-variable""
This reverts commit 7bbb80ab52c203e44d2ded2c947b3b03b4b31ec4.
Change-Id: If806ce5c6c5e96fdb2c3761dee096f74e7e5b001
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 26575fd..8eef10b 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,6 +18,8 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
+#include "lambda/closure.h"
+#include "lambda/leaking_allocator.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -26,11 +28,53 @@
namespace art {
namespace lambda {
+// Temporarily represent the lambda Closure as its raw bytes in an array.
+// TODO: Generate a proxy class for the closure when boxing the first time.
+using BoxedClosurePointerType = mirror::ByteArray*;
+
+static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return mirror::ByteArray::GetArrayClass();
+}
+
+namespace {
+ // Convenience functions to allocating/deleting box table copies of the closures.
+ struct ClosureAllocator {
+ // Deletes a Closure that was allocated through ::Allocate.
+ static void Delete(Closure* ptr) {
+ delete[] reinterpret_cast<char*>(ptr);
+ }
+
+ // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
+ static Closure* Allocate(size_t size) {
+ DCHECK_GE(size, sizeof(Closure));
+
+ // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
+ Closure* closure = reinterpret_cast<Closure*>(new char[size]);
+ DCHECK_ALIGNED(closure, alignof(Closure));
+ return closure;
+ }
+ };
+} // namespace
BoxTable::BoxTable()
: allow_new_weaks_(true),
new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
+BoxTable::~BoxTable() {
+ // Free all the copies of our closures.
+ for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ++map_iterator) {
+ std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
+
+ Closure* closure = key_value_pair.first;
+
+ // Remove from the map first, so that it doesn't try to access dangling pointer.
+ map_iterator = map_.Erase(map_iterator);
+
+ // Safe to delete, no dangling pointers.
+ ClosureAllocator::Delete(closure);
+ }
+}
+
mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
Thread* self = Thread::Current();
@@ -58,22 +102,29 @@
// Release the lambda table lock here, so that thread suspension is allowed.
- // Convert the ArtMethod into a java.lang.reflect.Method which will serve
+ // Convert the Closure into a managed byte[] which will serve
// as the temporary 'boxed' version of the lambda. This is good enough
// to check all the basic object identities that a boxed lambda must retain.
+ // It's also good enough to contain all the captured primitive variables.
// TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
// TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- mirror::Method* method_as_object =
- mirror::Method::CreateFromArtMethod(self, closure);
+ BoxedClosurePointerType closure_as_array_object =
+ mirror::ByteArray::Alloc(self, closure->GetSize());
+
// There are no thread suspension points after this, so we don't need to put it into a handle.
- if (UNLIKELY(method_as_object == nullptr)) {
+ if (UNLIKELY(closure_as_array_object == nullptr)) {
// Most likely an OOM has occurred.
CHECK(self->IsExceptionPending());
return nullptr;
}
+ // Write the raw closure data into the byte[].
+ closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
+ 0 /*index*/), // index
+ closure_as_array_object->GetLength());
+
// The method has been successfully boxed into an object, now insert it into the hash map.
{
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -87,38 +138,56 @@
return value.Read();
}
- // Otherwise we should insert it into the hash map in this thread.
- map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
+ // Otherwise we need to insert it into the hash map in this thread.
+
+ // Make a copy for the box table to keep, in case the closure gets collected from the stack.
+ // TODO: GC may need to sweep for roots in the box table's copy of the closure.
+ Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
+ closure->CopyTo(closure_table_copy, closure->GetSize());
+
+ // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
+
+ // Actually insert into the table.
+ map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
}
- return method_as_object;
+ return closure_as_array_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
DCHECK(object != nullptr);
*out_closure = nullptr;
+ Thread* self = Thread::Current();
+
// Note that we do not need to access lambda_table_lock_ here
// since we don't need to look at the map.
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
- if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
- ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof byte[]
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
+ ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a Method.
+ // specified in [type id]. This is not currently implemented since it's always a byte[].
// If we got this far, the inputs are valid.
- // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
- mirror::AbstractMethod* boxed_closure_as_method =
- down_cast<mirror::AbstractMethod*>(boxed_closure_object);
+ // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
+ BoxedClosurePointerType boxed_closure_as_array =
+ down_cast<BoxedClosurePointerType>(boxed_closure_object);
- ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
- DCHECK(unboxed_closure != nullptr);
+ const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
+
+ // Allocate a copy that can "escape" and copy the closure data into that.
+ Closure* unboxed_closure =
+ LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
+ // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
+ memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
+
+ DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
*out_closure = unboxed_closure;
return true;
@@ -127,7 +196,7 @@
BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
auto map_iterator = map_.Find(closure);
if (map_iterator != map_.end()) {
- const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+ const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
const ValueType& value = key_value_pair.second;
DCHECK(!value.IsNull()); // Never store null boxes.
@@ -157,7 +226,7 @@
*/
std::vector<ClosureType> remove_list;
for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+ std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
const ValueType& old_value = key_value_pair.second;
@@ -166,10 +235,15 @@
mirror::Object* new_value = visitor->IsMarked(old_value_raw);
if (new_value == nullptr) {
- const ClosureType& closure = key_value_pair.first;
// The object has been swept away.
+ const ClosureType& closure = key_value_pair.first;
+
// Delete the entry from the map.
- map_iterator = map_.Erase(map_.Find(closure));
+ map_iterator = map_.Erase(map_iterator);
+
+ // Clean up the memory by deleting the closure.
+ ClosureAllocator::Delete(closure);
+
} else {
// The object has been moved.
// Update the map.
@@ -208,16 +282,33 @@
new_weaks_condition_.Broadcast(self);
}
-bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
+void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ item.first = nullptr;
+
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ item.second = ValueType(); // Also clear the GC root.
+}
+
+bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ return item.first == nullptr;
+}
+
+bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
+ const UnorderedMapKeyType& rhs) const {
// Nothing needs this right now, but leave this assertion for later when
// we need to look at the references inside of the closure.
- if (kIsDebugBuild) {
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- }
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- return lhs == rhs;
+ return lhs->ReferenceEquals(rhs);
+}
+
+size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
+ const lambda::Closure* closure = key;
+ DCHECK_ALIGNED(closure, alignof(lambda::Closure));
+
+ // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ return closure->GetHashCode();
}
} // namespace lambda