Revert "lambda: Add support for invoke-interface for boxed innate lambdas"
955-lambda is flaky
Bug: 24618608
Bug: 25107649
This reverts commit 457e874459ae638145cab6d572e34d48480e39d2.
Change-Id: I24884344d21d7a4262e53e3f5dba57032687ddb7
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
index 0690cd1..6f9f8bb 100644
--- a/runtime/lambda/art_lambda_method.cc
+++ b/runtime/lambda/art_lambda_method.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "art_method-inl.h"
#include "lambda/art_lambda_method.h"
#include "base/logging.h"
@@ -74,12 +73,5 @@
}
}
-size_t ArtLambdaMethod::GetArgumentVRegCount() const {
- DCHECK(GetArtMethod()->IsStatic()); // Instance methods don't have receiver in shorty.
- const char* method_shorty = GetArtMethod()->GetShorty();
- DCHECK_NE(*method_shorty, '\0') << method_shorty;
- return ShortyFieldType::CountVirtualRegistersRequired(method_shorty + 1); // skip return type
-}
-
} // namespace lambda
} // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
index a858bf9..ea13eb7 100644
--- a/runtime/lambda/art_lambda_method.h
+++ b/runtime/lambda/art_lambda_method.h
@@ -90,17 +90,6 @@
return strlen(captured_variables_shorty_);
}
- // Return the offset in bytes from the start of ArtLambdaMethod to the method_.
- // -- Only should be used by assembly (stubs) support code and compiled code.
- static constexpr size_t GetArtMethodOffset() {
- return offsetof(ArtLambdaMethod, method_);
- }
-
- // Calculate how many vregs all the arguments will use when doing an invoke.
- // (Most primitives are 1 vregs, double/long are 2, reference is 1, lambda is 2).
- // -- This is used to know how big to set up shadow frame when invoking into the target method.
- size_t GetArgumentVRegCount() const SHARED_REQUIRES(Locks::mutator_lock_);
-
private:
// TODO: ArtMethod, or at least the entry points should be inlined into this struct
// to avoid an extra indirect load when doing invokes.
diff --git a/runtime/lambda/box_class_table-inl.h b/runtime/lambda/box_class_table-inl.h
deleted file mode 100644
index 2fc34a7..0000000
--- a/runtime/lambda/box_class_table-inl.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
-#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
-
-#include "lambda/box_class_table.h"
-#include "thread.h"
-
-namespace art {
-namespace lambda {
-
-template <typename Visitor>
-inline void BoxClassTable::VisitRoots(const Visitor& visitor) {
- MutexLock mu(Thread::Current(), *Locks::lambda_class_table_lock_);
- for (std::pair<UnorderedMapKeyType, ValueType>& key_value : map_) {
- ValueType& gc_root = key_value.second;
- visitor.VisitRoot(gc_root.AddressWithoutBarrier());
- }
-}
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
diff --git a/runtime/lambda/box_class_table.cc b/runtime/lambda/box_class_table.cc
deleted file mode 100644
index 1e49886..0000000
--- a/runtime/lambda/box_class_table.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/box_class_table.h"
-
-#include "base/mutex.h"
-#include "common_throws.h"
-#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "thread.h"
-
-#include <string>
-#include <vector>
-
-namespace art {
-namespace lambda {
-
-// Create the lambda proxy class given the name of the lambda interface (e.g. Ljava/lang/Runnable;)
-// Also needs a proper class loader (or null for bootclasspath) where the proxy will be created
-// into.
-//
-// The class must **not** have already been created.
-// Returns a non-null ptr on success, otherwise returns null and has an exception set.
-static mirror::Class* CreateClass(Thread* self,
- const std::string& class_name,
- const Handle<mirror::ClassLoader>& class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(self);
- StackHandleScope<2> hs(self);
-
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
- // Find the java.lang.Class for our class name (from the class loader).
- Handle<mirror::Class> lambda_interface =
- hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), class_loader));
- // TODO: use LookupClass in a loop
- // TODO: DCHECK That this doesn't actually cause the class to be loaded,
- // since the create-lambda should've loaded it already
- DCHECK(lambda_interface.Get() != nullptr) << "CreateClass with class_name=" << class_name;
- DCHECK(lambda_interface->IsInterface()) << "CreateClass with class_name=" << class_name;
- jobject lambda_interface_class = soa.AddLocalReference<jobject>(lambda_interface.Get());
-
- // Look up java.lang.reflect.Proxy#getLambdaProxyClass method.
- Handle<mirror::Class> java_lang_reflect_proxy =
- hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/reflect/Proxy;"));
- jclass java_lang_reflect_proxy_class =
- soa.AddLocalReference<jclass>(java_lang_reflect_proxy.Get());
- DCHECK(java_lang_reflect_proxy.Get() != nullptr);
-
- jmethodID proxy_factory_method_id =
- soa.Env()->GetStaticMethodID(java_lang_reflect_proxy_class,
- "getLambdaProxyClass",
- "(Ljava/lang/ClassLoader;Ljava/lang/Class;)Ljava/lang/Class;");
- DCHECK(!soa.Env()->ExceptionCheck());
-
- // Call into the java code to do the hard work of figuring out which methods and throws
- // our lambda interface proxy needs to implement. It then calls back into the class linker
- // on our behalf to make the proxy itself.
- jobject generated_lambda_proxy_class =
- soa.Env()->CallStaticObjectMethod(java_lang_reflect_proxy_class,
- proxy_factory_method_id,
- class_loader.ToJObject(),
- lambda_interface_class);
-
- // This can throw in which case we return null. Caller must handle.
- return soa.Decode<mirror::Class*>(generated_lambda_proxy_class);
-}
-
-BoxClassTable::BoxClassTable() {
-}
-
-BoxClassTable::~BoxClassTable() {
- // Don't need to do anything, classes are deleted automatically by GC
- // when the classloader is deleted.
- //
- // Our table will not outlive the classloader since the classloader owns it.
-}
-
-mirror::Class* BoxClassTable::GetOrCreateBoxClass(const char* class_name,
- const Handle<mirror::ClassLoader>& class_loader) {
- DCHECK(class_name != nullptr);
-
- Thread* self = Thread::Current();
-
- std::string class_name_str = class_name;
-
- {
- MutexLock mu(self, *Locks::lambda_class_table_lock_);
-
- // Attempt to look up this class, it's possible it was already created previously.
- // If this is the case we *must* return the same class as before to maintain
- // referential equality between box instances.
- //
- // In managed code:
- // Functional f = () -> 5; // vF = create-lambda
- // Object a = f; // vA = box-lambda vA
- // Object b = f; // vB = box-lambda vB
- // assert(a.getClass() == b.getClass())
- // assert(a == b)
- ValueType value = FindBoxedClass(class_name_str);
- if (!value.IsNull()) {
- return value.Read();
- }
- }
-
- // Otherwise we need to generate a class ourselves and insert it into the hash map
-
- // Release the table lock here, which implicitly allows other threads to suspend
- // (since the GC callbacks will not block on trying to acquire our lock).
- // We also don't want to call into the class linker with the lock held because
- // our lock level is lower.
- self->AllowThreadSuspension();
-
- // Create a lambda proxy class, within the specified class loader.
- mirror::Class* lambda_proxy_class = CreateClass(self, class_name_str, class_loader);
-
- // There are no thread suspension points after this, so we don't need to put it into a handle.
- ScopedAssertNoThreadSuspension soants{self, "BoxClassTable::GetOrCreateBoxClass"}; // NOLINT: [readability/braces] [4]
-
- if (UNLIKELY(lambda_proxy_class == nullptr)) {
- // Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
- return nullptr;
- }
-
- {
- MutexLock mu(self, *Locks::lambda_class_table_lock_);
-
- // Possible, but unlikely, that someone already came in and made a proxy class
- // on another thread.
- ValueType value = FindBoxedClass(class_name_str);
- if (UNLIKELY(!value.IsNull())) {
- DCHECK_EQ(lambda_proxy_class, value.Read());
- return value.Read();
- }
-
- // Otherwise we made a brand new proxy class.
- // The class itself is cleaned up by the GC (e.g. class unloading) later.
-
- // Actually insert into the table.
- map_.Insert({std::move(class_name_str), ValueType(lambda_proxy_class)});
- }
-
- return lambda_proxy_class;
-}
-
-BoxClassTable::ValueType BoxClassTable::FindBoxedClass(const std::string& class_name) const {
- auto map_iterator = map_.Find(class_name);
- if (map_iterator != map_.end()) {
- const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
- const ValueType& value = key_value_pair.second;
-
- DCHECK(!value.IsNull()); // Never store null boxes.
- return value;
- }
-
- return ValueType(nullptr);
-}
-
-void BoxClassTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
- item.first.clear();
-
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- item.second = ValueType(); // Also clear the GC root.
-}
-
-bool BoxClassTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- bool is_empty = item.first.empty();
- DCHECK_EQ(item.second.IsNull(), is_empty);
-
- return is_empty;
-}
-
-bool BoxClassTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
- const UnorderedMapKeyType& rhs) const {
- // Be damn sure the classes don't just move around from under us.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-
- // Being the same class name isn't enough, must also have the same class loader.
- // When we are in the same class loader, classes are equal via the pointer.
- return lhs == rhs;
-}
-
-size_t BoxClassTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
- return std::hash<std::string>()(key);
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/box_class_table.h b/runtime/lambda/box_class_table.h
deleted file mode 100644
index 17e1026..0000000
--- a/runtime/lambda/box_class_table.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
-#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
-
-#include "base/allocator.h"
-#include "base/hash_map.h"
-#include "gc_root.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "object_callbacks.h"
-
-#include <stdint.h>
-
-namespace art {
-
-class ArtMethod; // forward declaration
-template<class T> class Handle; // forward declaration
-
-namespace mirror {
-class Class; // forward declaration
-class ClassLoader; // forward declaration
-class LambdaProxy; // forward declaration
-class Object; // forward declaration
-} // namespace mirror
-
-namespace lambda {
-struct Closure; // forward declaration
-
-/*
- * Store a table of boxed lambdas. This is required to maintain object referential equality
- * when a lambda is re-boxed.
- *
- * Conceptually, we store a mapping of Class Name -> Weak Reference<Class>.
- * When too many objects get GCd, we shrink the underlying table to use less space.
- */
-class BoxClassTable FINAL {
- public:
- // TODO: This should take a LambdaArtMethod instead, read class name from that.
- // Note: null class_loader means bootclasspath.
- mirror::Class* GetOrCreateBoxClass(const char* class_name,
- const Handle<mirror::ClassLoader>& class_loader)
- REQUIRES(!Locks::lambda_class_table_lock_, !Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Sweep strong references to lambda class boxes. Update the addresses if the objects
- // have been moved, and delete them from the table if the objects have been cleaned up.
- template <typename Visitor>
- void VisitRoots(const Visitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS // for object marking requiring heap bitmap lock
- REQUIRES(!Locks::lambda_class_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- BoxClassTable();
- ~BoxClassTable();
-
- private:
- // We only store strong GC roots in our table.
- using ValueType = GcRoot<mirror::Class>;
-
- // Attempt to look up the class in the map, or return null if it's not there yet.
- ValueType FindBoxedClass(const std::string& class_name) const
- SHARED_REQUIRES(Locks::lambda_class_table_lock_);
-
- // Store the key as a string so that we can have our own copy of the class name.
- using UnorderedMapKeyType = std::string;
-
- // EmptyFn implementation for art::HashMap
- struct EmptyFn {
- void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
- NO_THREAD_SAFETY_ANALYSIS;
- // SHARED_REQUIRES(Locks::mutator_lock_);
-
- bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
- };
-
- // HashFn implementation for art::HashMap
- struct HashFn {
- size_t operator()(const UnorderedMapKeyType& key) const
- NO_THREAD_SAFETY_ANALYSIS;
- // SHARED_REQUIRES(Locks::mutator_lock_);
- };
-
- // EqualsFn implementation for art::HashMap
- struct EqualsFn {
- bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
- NO_THREAD_SAFETY_ANALYSIS;
- // SHARED_REQUIRES(Locks::mutator_lock_);
- };
-
- using UnorderedMap = art::HashMap<UnorderedMapKeyType,
- ValueType,
- EmptyFn,
- HashFn,
- EqualsFn,
- TrackingAllocator<std::pair<UnorderedMapKeyType, ValueType>,
- kAllocatorTagLambdaProxyClassBoxTable>>;
-
- // Map of strong GC roots (lambda interface name -> lambda proxy class)
- UnorderedMap map_ GUARDED_BY(Locks::lambda_class_table_lock_);
-
- // Shrink the map when we get below this load factor.
- // (This is an arbitrary value that should be large enough to prevent aggressive map erases
- // from shrinking the table too often.)
- static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
-
- DISALLOW_COPY_AND_ASSIGN(BoxClassTable);
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 0032d08..9918bb7 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,10 +18,8 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
-#include "lambda/box_class_table.h"
#include "lambda/closure.h"
#include "lambda/leaking_allocator.h"
-#include "mirror/lambda_proxy.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -30,13 +28,12 @@
namespace art {
namespace lambda {
-// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface.
-using BoxedClosurePointerType = mirror::LambdaProxy*;
+// Temporarily represent the lambda Closure as its raw bytes in an array.
+// TODO: Generate a proxy class for the closure when boxing the first time.
+using BoxedClosurePointerType = mirror::ByteArray*;
-// Returns the base class for all boxed closures.
-// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy.
-static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy);
+static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return mirror::ByteArray::GetArrayClass();
}
namespace {
@@ -57,14 +54,6 @@
return closure;
}
};
-
- struct DeleterForClosure {
- void operator()(Closure* closure) const {
- ClosureAllocator::Delete(closure);
- }
- };
-
- using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>;
} // namespace
BoxTable::BoxTable()
@@ -86,9 +75,7 @@
}
}
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure,
- const char* class_name,
- mirror::ClassLoader* class_loader) {
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
Thread* self = Thread::Current();
{
@@ -104,7 +91,7 @@
// Functional f = () -> 5; // vF = create-lambda
// Object a = f; // vA = box-lambda vA
// Object b = f; // vB = box-lambda vB
- // assert(a == b)
+ // assert(a == f)
ValueType value = FindBoxedLambda(closure);
if (!value.IsNull()) {
return value.Read();
@@ -113,62 +100,30 @@
// Otherwise we need to box ourselves and insert it into the hash map
}
- // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy.
-
- // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4]
-
- Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader);
-
// Release the lambda table lock here, so that thread suspension is allowed.
- self->AllowThreadSuspension();
- lambda::BoxClassTable* lambda_box_class_table;
+ // Convert the Closure into a managed byte[] which will serve
+ // as the temporary 'boxed' version of the lambda. This is good enough
+ // to check all the basic object identities that a boxed lambda must retain.
+ // It's also good enough to contain all the captured primitive variables.
- // Find the lambda box class table, which can be in the system class loader if classloader is null
- if (class_loader == nullptr) {
- ScopedObjectAccessUnchecked soa(self);
- mirror::ClassLoader* system_class_loader =
- soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader());
- lambda_box_class_table = system_class_loader->GetLambdaProxyCache();
- } else {
- lambda_box_class_table = class_loader_handle->GetLambdaProxyCache();
- // OK: can't be deleted while we hold a handle to the class loader.
- }
- DCHECK(lambda_box_class_table != nullptr);
+ // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
+ // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
+ BoxedClosurePointerType closure_as_array_object =
+ mirror::ByteArray::Alloc(self, closure->GetSize());
- Handle<mirror::Class> closure_class(hs.NewHandle(
- lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle)));
- if (UNLIKELY(closure_class.Get() == nullptr)) {
+ // There are no thread suspension points after this, so we don't need to put it into a handle.
+
+ if (UNLIKELY(closure_as_array_object == nullptr)) {
// Most likely an OOM has occurred.
- self->AssertPendingException();
+ CHECK(self->IsExceptionPending());
return nullptr;
}
- BoxedClosurePointerType closure_as_object = nullptr;
- UniqueClosurePtr closure_table_copy;
- // Create an instance of the class, and assign the pointer to the closure into it.
- {
- closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self));
- if (UNLIKELY(closure_as_object == nullptr)) {
- self->AssertPendingOOMException();
- return nullptr;
- }
-
- // Make a copy of the closure that we will store in the hash map.
- // The proxy instance will also point to this same hash map.
- // Note that the closure pointer is cleaned up only after the proxy is GCd.
- closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize()));
- closure_as_object->SetClosure(closure_table_copy.get());
- }
-
- // There are no thread suspension points after this, so we don't need to put it into a handle.
- ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5]
- "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5]
-
- // Write the raw closure data into the proxy instance's copy of the closure.
- closure->CopyTo(closure_table_copy.get(),
- closure->GetSize());
+ // Write the raw closure data into the byte[].
+ closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
+ 0 /*index*/), // index
+ closure_as_array_object->GetLength());
// The method has been successfully boxed into an object, now insert it into the hash map.
{
@@ -179,21 +134,24 @@
// we were allocating the object before.
ValueType value = FindBoxedLambda(closure);
if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up closure_as_object at a later time.
- // (We will not see this object when sweeping, it wasn't inserted yet.)
- closure_as_object->SetClosure(nullptr);
+ // Let the GC clean up method_as_object at a later time.
return value.Read();
}
// Otherwise we need to insert it into the hash map in this thread.
- // The closure_table_copy is deleted by us manually when we erase it from the map.
+ // Make a copy for the box table to keep, in case the closure gets collected from the stack.
+ // TODO: GC may need to sweep for roots in the box table's copy of the closure.
+ Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
+ closure->CopyTo(closure_table_copy, closure->GetSize());
+
+ // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
// Actually insert into the table.
- map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)});
+ map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
}
- return closure_as_object;
+ return closure_as_array_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
@@ -207,35 +165,29 @@
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof LambdaProxy
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) {
- ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof byte[]
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
+ ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since the type id is unavailable.
+ // specified in [type id]. This is not currently implemented since it's always a byte[].
// If we got this far, the inputs are valid.
- // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy,
- // and return it.
- BoxedClosurePointerType boxed_closure =
+ // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
+ BoxedClosurePointerType boxed_closure_as_array =
down_cast<BoxedClosurePointerType>(boxed_closure_object);
- DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure));
- const Closure* aligned_interior_closure = boxed_closure->GetClosure();
- DCHECK(aligned_interior_closure != nullptr);
-
- // TODO: we probably don't need to make a copy here later on, once there's GC support.
+ const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
// Allocate a copy that can "escape" and copy the closure data into that.
Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize());
- DCHECK_ALIGNED(unboxed_closure, alignof(Closure));
+ LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
// TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize());
+ memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
- DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize());
+ DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
*out_closure = unboxed_closure;
return true;
@@ -284,10 +236,9 @@
if (new_value == nullptr) {
// The object has been swept away.
- Closure* closure = key_value_pair.first;
+ const ClosureType& closure = key_value_pair.first;
// Delete the entry from the map.
- // (Remove from map first to avoid accessing dangling pointer).
map_iterator = map_.Erase(map_iterator);
// Clean up the memory by deleting the closure.
@@ -339,10 +290,7 @@
}
bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- bool is_empty = item.first == nullptr;
- DCHECK_EQ(item.second.IsNull(), is_empty);
-
- return is_empty;
+ return item.first == nullptr;
}
bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 9dca6ab..adb7332 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -30,9 +30,6 @@
class ArtMethod; // forward declaration
namespace mirror {
-class Class; // forward declaration
-class ClassLoader; // forward declaration
-class LambdaProxy; // forward declaration
class Object; // forward declaration
} // namespace mirror
@@ -51,11 +48,8 @@
using ClosureType = art::lambda::Closure*;
// Boxes a closure into an object. Returns null and throws an exception on failure.
- mirror::Object* BoxLambda(const ClosureType& closure,
- const char* class_name,
- mirror::ClassLoader* class_loader)
- REQUIRES(!Locks::lambda_table_lock_, !Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* BoxLambda(const ClosureType& closure)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
// Unboxes an object back into the lambda. Returns false and throws an exception on failure.
bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
@@ -134,16 +128,7 @@
TrackingAllocator<std::pair<ClosureType, ValueType>,
kAllocatorTagLambdaBoxTable>>;
- using ClassMap = art::HashMap<std::string,
- GcRoot<mirror::Class>,
- EmptyFn,
- HashFn,
- EqualsFn,
- TrackingAllocator<std::pair<ClosureType, ValueType>,
- kAllocatorTagLambdaProxyClassBoxTable>>;
-
UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
- UnorderedMap classes_map_ GUARDED_BY(Locks::lambda_table_lock_);
bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
index f935e04..179e4ee 100644
--- a/runtime/lambda/closure.cc
+++ b/runtime/lambda/closure.cc
@@ -20,6 +20,9 @@
#include "lambda/art_lambda_method.h"
#include "runtime/mirror/object_reference.h"
+static constexpr const bool kClosureSupportsReferences = false;
+static constexpr const bool kClosureSupportsGarbageCollection = false;
+
namespace art {
namespace lambda {
@@ -125,10 +128,6 @@
return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
}
-ArtLambdaMethod* Closure::GetLambdaInfo() const {
- return const_cast<ArtLambdaMethod*>(lambda_info_);
-}
-
uint32_t Closure::GetHashCode() const {
// Start with a non-zero constant, a prime number.
uint32_t result = 17;
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
index 38ec063..31ff194 100644
--- a/runtime/lambda/closure.h
+++ b/runtime/lambda/closure.h
@@ -33,52 +33,12 @@
class ArtLambdaMethod; // forward declaration
class ClosureBuilder; // forward declaration
-// TODO: Remove these constants once closures are supported properly.
-
-// Does the lambda closure support containing references? If so, all the users of lambdas
-// must be updated to also support references.
-static constexpr const bool kClosureSupportsReferences = false;
-// Does the lambda closure support being garbage collected? If so, all the users of lambdas
-// must be updated to also support garbage collection.
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-// Does the lambda closure support being garbage collected with a read barrier? If so,
-// all the users of the lambdas msut also be updated to support read barrier GC.
-static constexpr const bool kClosureSupportsReadBarrier = false;
-
-// Is this closure being stored as a 'long' in shadow frames and the quick ABI?
-static constexpr const bool kClosureIsStoredAsLong = true;
-
-
-// Raw memory layout for the lambda closure.
-//
-// WARNING:
-// * This should only be used by the compiler and tests, as they need to offsetof the raw fields.
-// * Runtime/interpreter should always access closures through a Closure pointer.
-struct ClosureStorage {
- // Compile-time known lambda information such as the type descriptor and size.
- ArtLambdaMethod* lambda_info_;
-
- // A contiguous list of captured variables, and possibly the closure size.
- // The runtime size can always be determined through GetSize().
- union {
- // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
- uint8_t static_variables_[0];
- struct {
- // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
- size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
- uint8_t variables_[0];
- } dynamic_;
- } captured_[0];
- // captured_ will always consist of one array element at runtime.
- // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-};
-
// Inline representation of a lambda closure.
// Contains the target method and the set of packed captured variables as a copy.
//
// The closure itself is logically immutable, although in practice any object references
// it (recursively) contains can be moved and updated by the GC.
-struct Closure : private ClosureStorage {
+struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
// Get the size of the Closure in bytes.
// This is necessary in order to allocate a large enough area to copy the Closure into.
// Do *not* copy the closure with memcpy, since references also need to get moved.
@@ -92,9 +52,6 @@
// Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
ArtMethod* GetTargetMethod() const;
- // Get the static lambda info that never changes.
- ArtLambdaMethod* GetLambdaInfo() const;
-
// Calculates the hash code. Value is recomputed each time.
uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
@@ -199,15 +156,28 @@
static size_t GetClosureSize(const uint8_t* closure);
///////////////////////////////////////////////////////////////////////////////////
- // NOTE: Actual fields are declared in ClosureStorage.
+
+ // Compile-time known lambda information such as the type descriptor and size.
+ ArtLambdaMethod* lambda_info_;
+
+ // A contiguous list of captured variables, and possibly the closure size.
+ // The runtime size can always be determined through GetSize().
+ union {
+ // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
+ uint8_t static_variables_[0];
+ struct {
+ // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
+ size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
+ uint8_t variables_[0];
+ } dynamic_;
+ } captured_[0];
+ // captured_ will always consist of one array element at runtime.
+ // Set to [0] so that 'size_' is not counted in sizeof(Closure).
+
+ friend class ClosureBuilder;
friend class ClosureTest;
};
-// ABI guarantees:
-// * Closure same size as a ClosureStorage
-// * ClosureStorage begins at the same point a Closure would begin.
-static_assert(sizeof(Closure) == sizeof(ClosureStorage), "Closure size must match ClosureStorage");
-
} // namespace lambda
} // namespace art
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 7b36042..739e965 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -75,7 +75,7 @@
if (LIKELY(is_dynamic_size_ == false)) {
// Write in the extra bytes to store the dynamic size the first time.
is_dynamic_size_ = true;
- size_ += sizeof(ClosureStorage::captured_[0].dynamic_.size_);
+ size_ += sizeof(Closure::captured_[0].dynamic_.size_);
}
// A closure may be sized dynamically, so always query it for the true size.
@@ -107,40 +107,38 @@
<< "number of variables captured at runtime does not match "
<< "number of variables captured at compile time";
- ClosureStorage* closure_storage = new (memory) ClosureStorage;
- closure_storage->lambda_info_ = target_method;
+ Closure* closure = new (memory) Closure;
+ closure->lambda_info_ = target_method;
- static_assert(offsetof(ClosureStorage, captured_) == kInitialSize, "wrong initial size");
+ static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
size_t written_size;
if (UNLIKELY(is_dynamic_size_)) {
// The closure size must be set dynamically (i.e. nested lambdas).
- closure_storage->captured_[0].dynamic_.size_ = GetSize();
- size_t header_size = offsetof(ClosureStorage, captured_[0].dynamic_.variables_);
+ closure->captured_[0].dynamic_.size_ = GetSize();
+ size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
DCHECK_LE(header_size, GetSize());
size_t variables_size = GetSize() - header_size;
written_size =
WriteValues(target_method,
- closure_storage->captured_[0].dynamic_.variables_,
+ closure->captured_[0].dynamic_.variables_,
header_size,
variables_size);
} else {
// The closure size is known statically (i.e. no nested lambdas).
DCHECK(GetSize() == target_method->GetStaticClosureSize());
- size_t header_size = offsetof(ClosureStorage, captured_[0].static_variables_);
+ size_t header_size = offsetof(Closure, captured_[0].static_variables_);
DCHECK_LE(header_size, GetSize());
size_t variables_size = GetSize() - header_size;
written_size =
WriteValues(target_method,
- closure_storage->captured_[0].static_variables_,
+ closure->captured_[0].static_variables_,
header_size,
variables_size);
}
- // OK: The closure storage is guaranteed to be the same as a closure.
- Closure* closure = reinterpret_cast<Closure*>(closure_storage);
-
DCHECK_EQ(written_size, closure->GetSize());
+
return closure;
}
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
index 54bb4d4..46ddaa9 100644
--- a/runtime/lambda/shorty_field_type.h
+++ b/runtime/lambda/shorty_field_type.h
@@ -285,39 +285,6 @@
}
}
- // Get the number of virtual registers necessary to represent this type as a stack local.
- inline size_t GetVirtualRegisterCount() const {
- if (IsPrimitiveNarrow()) {
- return 1;
- } else if (IsPrimitiveWide()) {
- return 2;
- } else if (IsObject()) {
- return kObjectReferenceSize / sizeof(uint32_t);
- } else if (IsLambda()) {
- return 2;
- } else {
- DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
- UNREACHABLE();
- }
- }
-
- // Count how many virtual registers would be necessary in order to store this list of shorty
- // field types.
- inline size_t static CountVirtualRegistersRequired(const char* shorty) {
- size_t size = 0;
-
- while (shorty != nullptr && *shorty != '\0') {
- // Each argument appends to the size.
- ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4]
-
- size += shorty_field.GetVirtualRegisterCount();
-
- ++shorty;
- }
-
- return size;
- }
-
// Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
inline operator decltype(kByte)() const {
return value_;
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
index 430e39e..32bade9 100644
--- a/runtime/lambda/shorty_field_type_test.cc
+++ b/runtime/lambda/shorty_field_type_test.cc
@@ -218,56 +218,6 @@
}
} // TEST_F
-TEST_F(ShortyFieldTypeTest, TestCalculateVRegSize) {
- // Make sure the single calculation for each value is correct.
- std::pair<size_t, char> expected_actual_single[] = {
- // Primitives
- { 1u, 'Z' },
- { 1u, 'B' },
- { 1u, 'C' },
- { 1u, 'S' },
- { 1u, 'I' },
- { 1u, 'F' },
- { 2u, 'J' },
- { 2u, 'D' },
- // Non-primitives
- { 1u, 'L' },
- { 2u, '\\' },
- };
-
- for (auto pair : expected_actual_single) {
- SCOPED_TRACE(pair.second);
- EXPECT_EQ(pair.first, ShortyFieldType(pair.second).GetVirtualRegisterCount());
- }
-
- // Make sure we are correctly calculating how many virtual registers a shorty descriptor takes.
- std::pair<size_t, const char*> expected_actual[] = {
- // Empty list
- { 0u, "" },
- // Primitives
- { 1u, "Z" },
- { 1u, "B" },
- { 1u, "C" },
- { 1u, "S" },
- { 1u, "I" },
- { 1u, "F" },
- { 2u, "J" },
- { 2u, "D" },
- // Non-primitives
- { 1u, "L" },
- { 2u, "\\" },
- // Multiple things at once:
- { 10u, "ZBCSIFJD" },
- { 5u, "LLSSI" },
- { 6u, "LLL\\L" }
- };
-
- for (auto pair : expected_actual) {
- SCOPED_TRACE(pair.second);
- EXPECT_EQ(pair.first, ShortyFieldType::CountVirtualRegistersRequired(pair.second));
- }
-} // TEST_F
-
// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
struct ShortyTypeCharacteristics {