Revert "lambda: Experimental support for capture-variable and liberate-variable"
Test fails.
This reverts commit b72123440d8541362ebdb131436f9dbdda5fd329.
Change-Id: Ic9ed92f8c826d8465eb36b746dc44af05caf041c
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
index ea13eb7..892d8c6 100644
--- a/runtime/lambda/art_lambda_method.h
+++ b/runtime/lambda/art_lambda_method.h
@@ -35,7 +35,7 @@
// (Ownership of strings is retained by the caller and the lifetime should exceed this class).
ArtLambdaMethod(ArtMethod* target_method,
const char* captured_variables_type_descriptor,
- const char* captured_variables_shorty,
+ const char* captured_variables_shorty_,
bool innate_lambda = true);
// Get the target method for this lambda that would be used by the invoke-lambda dex instruction.
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 8eef10b..26575fd 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,8 +18,6 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -28,53 +26,11 @@
namespace art {
namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
-
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
-}
-
-namespace {
- // Convenience functions to allocating/deleting box table copies of the closures.
- struct ClosureAllocator {
- // Deletes a Closure that was allocated through ::Allocate.
- static void Delete(Closure* ptr) {
- delete[] reinterpret_cast<char*>(ptr);
- }
-
- // Returns a well-aligned pointer to a newly allocated Closure on the 'new' heap.
- static Closure* Allocate(size_t size) {
- DCHECK_GE(size, sizeof(Closure));
-
- // TODO: Maybe point to the interior of the boxed closure object after we add proxy support?
- Closure* closure = reinterpret_cast<Closure*>(new char[size]);
- DCHECK_ALIGNED(closure, alignof(Closure));
- return closure;
- }
- };
-} // namespace
BoxTable::BoxTable()
: allow_new_weaks_(true),
new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
-BoxTable::~BoxTable() {
- // Free all the copies of our closures.
- for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ++map_iterator) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-
- Closure* closure = key_value_pair.first;
-
- // Remove from the map first, so that it doesn't try to access dangling pointer.
- map_iterator = map_.Erase(map_iterator);
-
- // Safe to delete, no dangling pointers.
- ClosureAllocator::Delete(closure);
- }
-}
-
mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
Thread* self = Thread::Current();
@@ -102,29 +58,22 @@
// Release the lambda table lock here, so that thread suspension is allowed.
- // Convert the Closure into a managed byte[] which will serve
+ // Convert the ArtMethod into a java.lang.reflect.Method which will serve
// as the temporary 'boxed' version of the lambda. This is good enough
// to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
// TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
// TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
-
+ mirror::Method* method_as_object =
+ mirror::Method::CreateFromArtMethod(self, closure);
// There are no thread suspension points after this, so we don't need to put it into a handle.
- if (UNLIKELY(closure_as_array_object == nullptr)) {
+ if (UNLIKELY(method_as_object == nullptr)) {
// Most likely an OOM has occurred.
CHECK(self->IsExceptionPending());
return nullptr;
}
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
-
// The method has been successfully boxed into an object, now insert it into the hash map.
{
MutexLock mu(self, *Locks::lambda_table_lock_);
@@ -138,56 +87,38 @@
return value.Read();
}
- // Otherwise we need to insert it into the hash map in this thread.
-
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
-
- // Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
+ // Otherwise we should insert it into the hash map in this thread.
+ map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
}
- return closure_as_array_object;
+ return method_as_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
DCHECK(object != nullptr);
*out_closure = nullptr;
- Thread* self = Thread::Current();
-
// Note that we do not need to access lambda_table_lock_ here
// since we don't need to look at the map.
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
+ ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
+ // specified in [type id]. This is not currently implemented since it's always a Method.
// If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
- down_cast<BoxedClosurePointerType>(boxed_closure_object);
+ // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
+ mirror::AbstractMethod* boxed_closure_as_method =
+ down_cast<mirror::AbstractMethod*>(boxed_closure_object);
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
-
- // Allocate a copy that can "escape" and copy the closure data into that.
- Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
- // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
-
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
+ ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
+ DCHECK(unboxed_closure != nullptr);
*out_closure = unboxed_closure;
return true;
@@ -196,7 +127,7 @@
BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
auto map_iterator = map_.Find(closure);
if (map_iterator != map_.end()) {
- const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
+ const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
const ValueType& value = key_value_pair.second;
DCHECK(!value.IsNull()); // Never store null boxes.
@@ -226,7 +157,7 @@
*/
std::vector<ClosureType> remove_list;
for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
- std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
+ std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
const ValueType& old_value = key_value_pair.second;
@@ -235,15 +166,10 @@
mirror::Object* new_value = visitor->IsMarked(old_value_raw);
if (new_value == nullptr) {
- // The object has been swept away.
const ClosureType& closure = key_value_pair.first;
-
+ // The object has been swept away.
// Delete the entry from the map.
- map_iterator = map_.Erase(map_iterator);
-
- // Clean up the memory by deleting the closure.
- ClosureAllocator::Delete(closure);
-
+ map_iterator = map_.Erase(map_.Find(closure));
} else {
// The object has been moved.
// Update the map.
@@ -282,33 +208,16 @@
new_weaks_condition_.Broadcast(self);
}
-void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
- item.first = nullptr;
-
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- item.second = ValueType(); // Also clear the GC root.
-}
-
-bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
-}
-
-bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
- const UnorderedMapKeyType& rhs) const {
+bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
// Nothing needs this right now, but leave this assertion for later when
// we need to look at the references inside of the closure.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
- return lhs->ReferenceEquals(rhs);
-}
-
-size_t BoxTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
- const lambda::Closure* closure = key;
- DCHECK_ALIGNED(closure, alignof(lambda::Closure));
-
- // Need to hold mutator_lock_ before calling into Closure::GetHashCode.
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- return closure->GetHashCode();
+ // TODO: Need rework to use read barriers once closures have references inside of them that can
+ // move. Until then, it's safe to just compare the data inside of it directly.
+ return lhs == rhs;
}
} // namespace lambda
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index adb7332..9ffda66 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -34,7 +34,6 @@
} // namespace mirror
namespace lambda {
-struct Closure; // forward declaration
/*
* Store a table of boxed lambdas. This is required to maintain object referential equality
@@ -45,7 +44,7 @@
*/
class BoxTable FINAL {
public:
- using ClosureType = art::lambda::Closure*;
+ using ClosureType = art::ArtMethod*;
// Boxes a closure into an object. Returns null and throws an exception on failure.
mirror::Object* BoxLambda(const ClosureType& closure)
@@ -73,9 +72,10 @@
REQUIRES(!Locks::lambda_table_lock_);
BoxTable();
- ~BoxTable();
+ ~BoxTable() = default;
private:
+ // FIXME: This needs to be a GcRoot.
// Explanation:
// - After all threads are suspended (exclusive mutator lock),
// the concurrent-copying GC can move objects from the "from" space to the "to" space.
@@ -97,30 +97,30 @@
void BlockUntilWeaksAllowed()
SHARED_REQUIRES(Locks::lambda_table_lock_);
- // Wrap the Closure into a unique_ptr so that the HashMap can delete its memory automatically.
- using UnorderedMapKeyType = ClosureType;
-
// EmptyFn implementation for art::HashMap
struct EmptyFn {
- void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
-
- bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
+ void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
+ item.first = nullptr;
+ }
+ bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
+ return item.first == nullptr;
+ }
};
// HashFn implementation for art::HashMap
struct HashFn {
- size_t operator()(const UnorderedMapKeyType& key) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
+ size_t operator()(const ClosureType& key) const {
+ // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
+ return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
+ }
};
// EqualsFn implementation for art::HashMap
struct EqualsFn {
- bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
- NO_THREAD_SAFETY_ANALYSIS; // SHARED_REQUIRES(Locks::mutator_lock_)
+ bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
};
- using UnorderedMap = art::HashMap<UnorderedMapKeyType,
+ using UnorderedMap = art::HashMap<ClosureType,
ValueType,
EmptyFn,
HashFn,
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
index 179e4ee..95a17c6 100644
--- a/runtime/lambda/closure.cc
+++ b/runtime/lambda/closure.cc
@@ -124,55 +124,6 @@
memcpy(target, this, GetSize());
}
-ArtMethod* Closure::GetTargetMethod() const {
- return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
-}
-
-uint32_t Closure::GetHashCode() const {
- // Start with a non-zero constant, a prime number.
- uint32_t result = 17;
-
- // Include the hash with the ArtMethod.
- {
- uintptr_t method = reinterpret_cast<uintptr_t>(GetTargetMethod());
- result = 31 * result + Low32Bits(method);
- if (sizeof(method) == sizeof(uint64_t)) {
- result = 31 * result + High32Bits(method);
- }
- }
-
- // Include a hash for each captured variable.
- for (size_t i = 0; i < GetCapturedVariablesSize(); ++i) {
- // TODO: not safe for GC-able values since the address can move and the hash code would change.
- uint8_t captured_variable_raw_value;
- CopyUnsafeAtOffset<uint8_t>(i, /*out*/&captured_variable_raw_value); // NOLINT: [whitespace/comma] [3]
-
- result = 31 * result + captured_variable_raw_value;
- }
-
- // TODO: Fix above loop to work for objects and lambdas.
- static_assert(kClosureSupportsGarbageCollection == false,
- "Need to update above loop to read the hash code from the "
- "objects and lambdas recursively");
-
- return result;
-}
-
-bool Closure::ReferenceEquals(const Closure* other) const {
- DCHECK(other != nullptr);
-
- // TODO: Need rework to use read barriers once closures have references inside of them that can
- // move. Until then, it's safe to just compare the data inside of it directly.
- static_assert(kClosureSupportsReferences == false,
- "Unsafe to use memcmp in read barrier collector");
-
- if (GetSize() != other->GetSize()) {
- return false;
- }
-
- return memcmp(this, other, GetSize());
-}
-
size_t Closure::GetNumberOfCapturedVariables() const {
// TODO: refactor into art_lambda_method.h. Parsing should only be required here as a DCHECK.
VariableInfo variable_info =
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
index 31ff194..60d117e 100644
--- a/runtime/lambda/closure.h
+++ b/runtime/lambda/closure.h
@@ -49,19 +49,6 @@
// The target_size must be at least as large as GetSize().
void CopyTo(void* target, size_t target_size) const;
- // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
- ArtMethod* GetTargetMethod() const;
-
- // Calculates the hash code. Value is recomputed each time.
- uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Is this the same closure as other? e.g. same target method, same variables captured.
- //
- // Determines whether the two Closures are interchangeable instances.
- // Does *not* call Object#equals recursively. If two Closures compare ReferenceEquals true that
- // means that they are interchangeable values (usually for the purpose of boxing/unboxing).
- bool ReferenceEquals(const Closure* other) const SHARED_REQUIRES(Locks::mutator_lock_);
-
// How many variables were captured?
size_t GetNumberOfCapturedVariables() const;
diff --git a/runtime/lambda/closure_builder-inl.h b/runtime/lambda/closure_builder-inl.h
index 3cec21f..41a803b 100644
--- a/runtime/lambda/closure_builder-inl.h
+++ b/runtime/lambda/closure_builder-inl.h
@@ -35,8 +35,6 @@
values_.push_back(value_storage);
size_ += sizeof(T);
-
- shorty_types_ += kShortyType;
}
} // namespace lambda
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 739e965..9c37db8 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -64,8 +64,6 @@
UNIMPLEMENTED(FATAL) << "can't yet safely capture objects with read barrier";
}
}
-
- shorty_types_ += ShortyFieldType::kObject;
}
void ClosureBuilder::CaptureVariableLambda(Closure* closure) {
@@ -80,8 +78,6 @@
// A closure may be sized dynamically, so always query it for the true size.
size_ += closure->GetSize();
-
- shorty_types_ += ShortyFieldType::kLambda;
}
size_t ClosureBuilder::GetSize() const {
@@ -89,15 +85,9 @@
}
size_t ClosureBuilder::GetCaptureCount() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
return values_.size();
}
-const std::string& ClosureBuilder::GetCapturedVariableShortyTypes() const {
- DCHECK_EQ(values_.size(), shorty_types_.size());
- return shorty_types_;
-}
-
Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_method) const {
DCHECK(memory != nullptr);
DCHECK(target_method != nullptr);
@@ -148,14 +138,11 @@
size_t variables_size) const {
size_t total_size = header_size;
const char* shorty_types = target_method->GetCapturedVariablesShortyTypeDescriptor();
- DCHECK_STREQ(shorty_types, shorty_types_.c_str());
size_t variables_offset = 0;
size_t remaining_size = variables_size;
const size_t shorty_count = target_method->GetNumberOfCapturedVariables();
- DCHECK_EQ(shorty_count, GetCaptureCount());
-
for (size_t i = 0; i < shorty_count; ++i) {
ShortyFieldType shorty{shorty_types[i]}; // NOLINT [readability/braces] [4]
diff --git a/runtime/lambda/closure_builder.h b/runtime/lambda/closure_builder.h
index 23eb484..542e12a 100644
--- a/runtime/lambda/closure_builder.h
+++ b/runtime/lambda/closure_builder.h
@@ -40,12 +40,13 @@
//
// The mutator lock must be held for the duration of the lifetime of this object,
// since it needs to temporarily store heap references into an internal list.
-class ClosureBuilder {
+class ClosureBuilder : ValueObject {
public:
using ShortyTypeEnum = decltype(ShortyFieldType::kByte);
+
// Mark this primitive value to be captured as the specified type.
- template <typename T, ShortyTypeEnum kShortyType = ShortyFieldTypeSelectEnum<T>::value>
+ template <typename T, ShortyTypeEnum kShortyType>
void CaptureVariablePrimitive(T value);
// Mark this object reference to be captured.
@@ -62,9 +63,6 @@
// Returns how many variables have been captured so far.
size_t GetCaptureCount() const;
- // Get the list of captured variables' shorty field types.
- const std::string& GetCapturedVariableShortyTypes() const;
-
// Creates a closure in-place and writes out the data into 'memory'.
// Memory must be at least 'GetSize' bytes large.
// All previously marked data to be captured is now written out.
@@ -95,7 +93,6 @@
size_t size_ = kInitialSize;
bool is_dynamic_size_ = false;
std::vector<ShortyFieldTypeTraits::MaxType> values_;
- std::string shorty_types_;
};
} // namespace lambda
diff --git a/runtime/lambda/leaking_allocator.cc b/runtime/lambda/leaking_allocator.cc
deleted file mode 100644
index 4910732..0000000
--- a/runtime/lambda/leaking_allocator.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lambda/leaking_allocator.h"
-#include "linear_alloc.h"
-#include "runtime.h"
-
-namespace art {
-namespace lambda {
-
-void* LeakingAllocator::AllocateMemory(Thread* self, size_t byte_size) {
- // TODO: use GetAllocatorForClassLoader to allocate lambda ArtMethod data.
- return Runtime::Current()->GetLinearAlloc()->Alloc(self, byte_size);
-}
-
-} // namespace lambda
-} // namespace art
diff --git a/runtime/lambda/leaking_allocator.h b/runtime/lambda/leaking_allocator.h
deleted file mode 100644
index c3222d0..0000000
--- a/runtime/lambda/leaking_allocator.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-#define ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_
-
-#include <utility> // std::forward
-
-namespace art {
-class Thread; // forward declaration
-
-namespace lambda {
-
-// Temporary class to centralize all the leaking allocations.
-// Allocations made through this class are never freed, but it is a placeholder
-// that means that the calling code needs to be rewritten to properly:
-//
-// (a) Have a lifetime scoped to some other entity.
-// (b) Not be allocated over and over again if it was already allocated once (immutable data).
-//
-// TODO: do all of the above a/b for each callsite, and delete this class.
-class LeakingAllocator {
- public:
- // Allocate byte_size bytes worth of memory. Never freed.
- static void* AllocateMemory(Thread* self, size_t byte_size);
-
- // Make a new instance of T, flexibly sized, in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeFlexibleInstance(Thread* self, size_t byte_size, Args&&... args) {
- return new (AllocateMemory(self, byte_size)) T(std::forward<Args>(args)...);
- }
-
- // Make a new instance of T in-place at newly allocated memory. Never freed.
- template <typename T, typename... Args>
- static T* MakeInstance(Thread* self, Args&&... args) {
- return new (AllocateMemory(self, sizeof(T))) T(std::forward<Args>(args)...);
- }
-};
-
-} // namespace lambda
-} // namespace art
-
-#endif // ART_RUNTIME_LAMBDA_LEAKING_ALLOCATOR_H_