runtime: Add lambda box/unbox object equality

A lambda that is boxed with box-lambda is now stored as a weak reference
in a global runtime table (lambda::BoxTable). Repeatedly boxing the same
lambda closure value will always return the same java.lang.Object back.

Since there is no way to observe the address of an object, a GC can
happen and clean up the table of any dead boxed lambdas, which can also
shrink the table to prevent the memory use from growing too much.

(Note that a lambda closure is immutable, so hashing over it is
guaranteed safe.)

Change-Id: I786c1323ff14eed937936b303d511875f9642524
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
new file mode 100644
index 0000000..64a6076
--- /dev/null
+++ b/runtime/lambda/box_table.cc
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "lambda/box_table.h"
+
+#include "base/mutex.h"
+#include "common_throws.h"
+#include "gc_root-inl.h"
+#include "mirror/method.h"
+#include "mirror/object-inl.h"
+#include "thread.h"
+
+#include <vector>
+
+namespace art {
+namespace lambda {
+
+BoxTable::BoxTable()
+  : allow_new_weaks_(true),
+    new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
+
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
+  Thread* self = Thread::Current();
+
+  {
+    // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
+    /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
+    BlockUntilWeaksAllowed();
+
+    // Attempt to look up this object, it's possible it was already boxed previously.
+    // If this is the case we *must* return the same object as before to maintain
+    // referential equality.
+    //
+    // In managed code:
+    //   Functional f = () -> 5;  // vF = create-lambda
+    //   Object a = f;            // vA = box-lambda vA
+    //   Object b = f;            // vB = box-lambda vB
+    //   assert(a == f)
+    ValueType value = FindBoxedLambda(closure);
+    if (!value.IsNull()) {
+      return value.Read();
+    }
+
+    // Otherwise we need to box ourselves and insert it into the hash map
+  }
+
+  // Release the lambda table lock here, so that thread suspension is allowed.
+
+  // Convert the ArtMethod into a java.lang.reflect.Method which will serve
+  // as the temporary 'boxed' version of the lambda. This is good enough
+  // to check all the basic object identities that a boxed lambda must retain.
+
+  // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
+  // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
+  mirror::Method* method_as_object =
+      mirror::Method::CreateFromArtMethod(self, closure);
+  // There are no thread suspension points after this, so we don't need to put it into a handle.
+
+  if (UNLIKELY(method_as_object == nullptr)) {
+    // Most likely an OOM has occurred.
+    CHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  // The method has been successfully boxed into an object, now insert it into the hash map.
+  {
+    MutexLock mu(self, *Locks::lambda_table_lock_);
+    BlockUntilWeaksAllowed();
+
+    // Lookup the object again, it's possible another thread already boxed it while
+    // we were allocating the object before.
+    ValueType value = FindBoxedLambda(closure);
+    if (UNLIKELY(!value.IsNull())) {
+      // Let the GC clean up method_as_object at a later time.
+      return value.Read();
+    }
+
+    // Otherwise we should insert it into the hash map in this thread.
+    map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
+  }
+
+  return method_as_object;
+}
+
+bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
+  DCHECK(object != nullptr);
+  *out_closure = nullptr;
+
+  // Note that we do not need to access lambda_table_lock_ here
+  // since we don't need to look at the map.
+
+  mirror::Object* boxed_closure_object = object;
+
+  // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
+  if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
+    ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
+    return false;
+  }
+
+  // TODO(iam): We must check that the closure object extends/implements the type
+  // specified in [type id]. This is not currently implemented since it's always a Method.
+
+  // If we got this far, the inputs are valid.
+  // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
+  mirror::AbstractMethod* boxed_closure_as_method =
+      down_cast<mirror::AbstractMethod*>(boxed_closure_object);
+
+  ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
+  DCHECK(unboxed_closure != nullptr);
+
+  *out_closure = unboxed_closure;
+  return true;
+}
+
+BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
+  auto map_iterator = map_.Find(closure);
+  if (map_iterator != map_.end()) {
+    const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+    const ValueType& value = key_value_pair.second;
+
+    DCHECK(!value.IsNull());  // Never store null boxes.
+    return value;
+  }
+
+  return ValueType(nullptr);
+}
+
+void BoxTable::BlockUntilWeaksAllowed() {
+  Thread* self = Thread::Current();
+  while (UNLIKELY(allow_new_weaks_ == false)) {
+    new_weaks_condition_.WaitHoldingLocks(self);  // wait while holding mutator lock
+  }
+}
+
+void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
+  DCHECK(visitor != nullptr);
+
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::lambda_table_lock_);
+
+  /*
+   * Visit every weak root in our lambda box table.
+   * Remove unmarked objects, update marked objects to new address.
+   */
+  std::vector<ClosureType> remove_list;
+  for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
+    std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+
+    const ValueType& old_value = key_value_pair.second;
+
+    // This does not need a read barrier because this is called by GC.
+    mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
+    mirror::Object* new_value = visitor->IsMarked(old_value_raw);
+
+    if (new_value == nullptr) {
+      const ClosureType& closure = key_value_pair.first;
+      // The object has been swept away.
+      // Delete the entry from the map.
+      map_iterator = map_.Erase(map_.Find(closure));
+    } else {
+      // The object has been moved.
+      // Update the map.
+      key_value_pair.second = ValueType(new_value);
+      ++map_iterator;
+    }
+  }
+
+  // Occasionally shrink the map to avoid growing very large.
+  if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
+    map_.ShrinkToMaximumLoad();
+  }
+}
+
+void BoxTable::DisallowNewWeakBoxedLambdas() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::lambda_table_lock_);
+
+  allow_new_weaks_ = false;
+}
+
+void BoxTable::AllowNewWeakBoxedLambdas() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::lambda_table_lock_);
+
+  allow_new_weaks_ = true;
+  new_weaks_condition_.Broadcast(self);
+}
+
+void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::lambda_table_lock_);
+  CHECK_NE(allow_new_weaks_, false);
+}
+
+bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
+  // Nothing needs this right now, but leave this assertion for later when
+  // we need to look at the references inside of the closure.
+  if (kIsDebugBuild) {
+    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+  }
+
+  // TODO: Need rework to use read barriers once closures have references inside of them that can
+  // move. Until then, it's safe to just compare the data inside of it directly.
+  return lhs == rhs;
+}
+
+}  // namespace lambda
+}  // namespace art
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
new file mode 100644
index 0000000..12d3ff3
--- /dev/null
+++ b/runtime/lambda/box_table.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
+#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
+
+#include "base/allocator.h"
+#include "base/hash_map.h"
+#include "gc_root.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "object_callbacks.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class ArtMethod;  // forward declaration
+
+namespace mirror {
+class Object;  // forward declaration
+}  // namespace mirror
+
+namespace lambda {
+
+/*
+ * Store a table of boxed lambdas. This is required to maintain object referential equality
+ * when a lambda is re-boxed.
+ *
+ * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
+ * When too many objects get GCd, we shrink the underlying table to use less space.
+ */
+class BoxTable FINAL {
+ public:
+  using ClosureType = art::ArtMethod*;
+
+  // Boxes a closure into an object. Returns null and throws an exception on failure.
+  mirror::Object* BoxLambda(const ClosureType& closure)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+  // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
+  bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Sweep weak references to lambda boxes. Update the addresses if the objects have been
+  // moved, and delete them from the table if the objects have been cleaned up.
+  void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+  // GC callback: Temporarily block anyone from touching the map.
+  void DisallowNewWeakBoxedLambdas()
+      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+  // GC callback: Unblock any readers who have been queued waiting to touch the map.
+  void AllowNewWeakBoxedLambdas()
+      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+  // GC callback: Verify that the state is now blocking anyone from touching the map.
+  void EnsureNewWeakBoxedLambdasDisallowed()
+      LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+  BoxTable();
+  ~BoxTable() = default;
+
+ private:
+  // FIXME: This needs to be a GcRoot.
+  // Explanation:
+  // - After all threads are suspended (exclusive mutator lock),
+  //   the concurrent-copying GC can move objects from the "from" space to the "to" space.
+  // If an object is moved at that time and *before* SweepSystemWeaks are called then
+  // we don't know if the move has happened yet.
+  // Successive reads will then (incorrectly) look at the objects in the "from" space,
+  // which is a problem since the objects have been already forwarded and mutations
+  // would not be visible in the right space.
+  // Instead, use a GcRoot here which will be automatically updated by the GC.
+  //
+  // Also, any reads should be protected by a read barrier to always give us the "to" space address.
+  using ValueType = GcRoot<mirror::Object>;
+
+  // Attempt to look up the lambda in the map, or return null if it's not there yet.
+  ValueType FindBoxedLambda(const ClosureType& closure) const
+      SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+
+  // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
+  void BlockUntilWeaksAllowed()
+      SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+
+  // EmptyFn implementation for art::HashMap
+  struct EmptyFn {
+    void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
+      item.first = nullptr;
+    }
+    bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
+      return item.first == nullptr;
+    }
+  };
+
+  // HashFn implementation for art::HashMap
+  struct HashFn {
+    size_t operator()(const ClosureType& key) const {
+      // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
+      return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
+    }
+  };
+
+  // EqualsFn implementation for art::HashMap
+  struct EqualsFn {
+    bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
+  };
+
+  using UnorderedMap = art::HashMap<ClosureType,
+                                    ValueType,
+                                    EmptyFn,
+                                    HashFn,
+                                    EqualsFn,
+                                    TrackingAllocator<std::pair<ClosureType, ValueType>,
+                                                      kAllocatorTagLambdaBoxTable>>;
+
+  UnorderedMap map_                                          GUARDED_BY(Locks::lambda_table_lock_);
+  bool allow_new_weaks_                                      GUARDED_BY(Locks::lambda_table_lock_);
+  ConditionVariable new_weaks_condition_                     GUARDED_BY(Locks::lambda_table_lock_);
+
+  // Shrink the map when we get below this load factor.
+  // (This is an arbitrary value that should be large enough to prevent aggressive map erases
+  // from shrinking the table too often.)
+  static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
+
+  DISALLOW_COPY_AND_ASSIGN(BoxTable);
+};
+
+}  // namespace lambda
+}  // namespace art
+
+#endif  // ART_RUNTIME_LAMBDA_BOX_TABLE_H_