Revert "resolved conflicts for 70808827 to stage-aosp-master"
This reverts commit 5cf2d5c9cc7d3a582154c93ebc4fd7fc7584ded0.
Change-Id: Ifcffff034d4610c0d318cf67b0d98a28551c9605
diff --git a/include/utils/BasicHashtable.h b/include/utils/BasicHashtable.h
new file mode 100644
index 0000000..cf47059
--- /dev/null
+++ b/include/utils/BasicHashtable.h
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BASIC_HASHTABLE_H
+#define ANDROID_BASIC_HASHTABLE_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/TypeHelpers.h>
+
+namespace android {
+
+/* Implementation type. Nothing to see here. */
+class BasicHashtableImpl {
+protected:
+ struct Bucket {
+ // The collision flag indicates that the bucket is part of a collision chain
+ // such that at least two entries both hash to this bucket. When true, we
+ // may need to seek further along the chain to find the entry.
+ static const uint32_t COLLISION = 0x80000000UL;
+
+ // The present flag indicates that the bucket contains an initialized entry value.
+ static const uint32_t PRESENT = 0x40000000UL;
+
+ // Mask for 30 bits worth of the hash code that are stored within the bucket to
+ // speed up lookups and rehashing by eliminating the need to recalculate the
+ // hash code of the entry's key.
+ static const uint32_t HASH_MASK = 0x3fffffffUL;
+
+ // Combined value that stores the collision and present flags as well as
+ // a 30 bit hash code.
+ uint32_t cookie;
+
+ // Storage for the entry begins here.
+ char entry[0];
+ };
+
+ BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
+ size_t minimumInitialCapacity, float loadFactor);
+ BasicHashtableImpl(const BasicHashtableImpl& other);
+ virtual ~BasicHashtableImpl();
+
+ void dispose();
+ void edit();
+ void setTo(const BasicHashtableImpl& other);
+ void clear();
+
+ ssize_t next(ssize_t index) const;
+ ssize_t find(ssize_t index, hash_t hash, const void* __restrict__ key) const;
+ size_t add(hash_t hash, const void* __restrict__ entry);
+ void removeAt(size_t index);
+ void rehash(size_t minimumCapacity, float loadFactor);
+
+ const size_t mBucketSize; // number of bytes per bucket including the entry
+ const bool mHasTrivialDestructor; // true if the entry type does not require destruction
+ size_t mCapacity; // number of buckets that can be filled before exceeding load factor
+ float mLoadFactor; // load factor
+ size_t mSize; // number of elements actually in the table
+ size_t mFilledBuckets; // number of buckets for which collision or present is true
+ size_t mBucketCount; // number of slots in the mBuckets array
+ void* mBuckets; // array of buckets, as a SharedBuffer
+
+ inline const Bucket& bucketAt(const void* __restrict__ buckets, size_t index) const {
+ return *reinterpret_cast<const Bucket*>(
+ static_cast<const uint8_t*>(buckets) + index * mBucketSize);
+ }
+
+ inline Bucket& bucketAt(void* __restrict__ buckets, size_t index) const {
+ return *reinterpret_cast<Bucket*>(static_cast<uint8_t*>(buckets) + index * mBucketSize);
+ }
+
+ virtual bool compareBucketKey(const Bucket& bucket, const void* __restrict__ key) const = 0;
+ virtual void initializeBucketEntry(Bucket& bucket, const void* __restrict__ entry) const = 0;
+ virtual void destroyBucketEntry(Bucket& bucket) const = 0;
+
+private:
+ void clone();
+
+ // Allocates a bucket array as a SharedBuffer.
+ void* allocateBuckets(size_t count) const;
+
+ // Releases a bucket array's associated SharedBuffer.
+ void releaseBuckets(void* __restrict__ buckets, size_t count) const;
+
+ // Destroys the contents of buckets (invokes destroyBucketEntry for each
+ // populated bucket if needed).
+ void destroyBuckets(void* __restrict__ buckets, size_t count) const;
+
+ // Copies the content of buckets (copies the cookie and invokes copyBucketEntry
+ // for each populated bucket if needed).
+ void copyBuckets(const void* __restrict__ fromBuckets,
+ void* __restrict__ toBuckets, size_t count) const;
+
+ // Determines the appropriate size of a bucket array to store a certain minimum
+ // number of entries and returns its effective capacity.
+ static void determineCapacity(size_t minimumCapacity, float loadFactor,
+ size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity);
+
+ // Trim a hash code to 30 bits to match what we store in the bucket's cookie.
+ inline static hash_t trimHash(hash_t hash) {
+ return (hash & Bucket::HASH_MASK) ^ (hash >> 30);
+ }
+
+ // Returns the index of the first bucket that is in the collision chain
+ // for the specified hash code, given the total number of buckets.
+ // (Primary hash)
+ inline static size_t chainStart(hash_t hash, size_t count) {
+ return hash % count;
+ }
+
+ // Returns the increment to add to a bucket index to seek to the next bucket
+ // in the collision chain for the specified hash code, given the total number of buckets.
+ // (Secondary hash)
+ inline static size_t chainIncrement(hash_t hash, size_t count) {
+ return ((hash >> 7) | (hash << 25)) % (count - 1) + 1;
+ }
+
+ // Returns the index of the next bucket that is in the collision chain
+ // that is defined by the specified increment, given the total number of buckets.
+ inline static size_t chainSeek(size_t index, size_t increment, size_t count) {
+ return (index + increment) % count;
+ }
+};
+
+/*
+ * A BasicHashtable stores entries that are indexed by hash code in place
+ * within an array. The basic operations are finding entries by key,
+ * adding new entries and removing existing entries.
+ *
+ * This class provides a very limited set of operations with simple semantics.
+ * It is intended to be used as a building block to construct more complex
+ * and interesting data structures such as HashMap. Think very hard before
+ * adding anything extra to BasicHashtable, it probably belongs at a
+ * higher level of abstraction.
+ *
+ * TKey: The key type.
+ * TEntry: The entry type which is what is actually stored in the array.
+ *
+ * TKey must support the following contract:
+ * bool operator==(const TKey& other) const; // return true if equal
+ * bool operator!=(const TKey& other) const; // return true if unequal
+ *
+ * TEntry must support the following contract:
+ * const TKey& getKey() const; // get the key from the entry
+ *
+ * This class supports storing entries with duplicate keys. Of course, it can't
+ * tell them apart during removal so only the first entry will be removed.
+ * We do this because it means that operations like add() can't fail.
+ */
+template <typename TKey, typename TEntry>
+class BasicHashtable : private BasicHashtableImpl {
+public:
+ /* Creates a hashtable with the specified minimum initial capacity.
+ * The underlying array will be created when the first entry is added.
+ *
+ * minimumInitialCapacity: The minimum initial capacity for the hashtable.
+ * Default is 0.
+ * loadFactor: The desired load factor for the hashtable, between 0 and 1.
+ * Default is 0.75.
+ */
+ BasicHashtable(size_t minimumInitialCapacity = 0, float loadFactor = 0.75f);
+
+ /* Copies a hashtable.
+ * The underlying storage is shared copy-on-write.
+ */
+ BasicHashtable(const BasicHashtable& other);
+
+ /* Clears and destroys the hashtable.
+ */
+ virtual ~BasicHashtable();
+
+ /* Making this hashtable a copy of the other hashtable.
+ * The underlying storage is shared copy-on-write.
+ *
+ * other: The hashtable to copy.
+ */
+ inline BasicHashtable<TKey, TEntry>& operator =(const BasicHashtable<TKey, TEntry> & other) {
+ setTo(other);
+ return *this;
+ }
+
+ /* Returns the number of entries in the hashtable.
+ */
+ inline size_t size() const {
+ return mSize;
+ }
+
+ /* Returns the capacity of the hashtable, which is the number of elements that can
+ * added to the hashtable without requiring it to be grown.
+ */
+ inline size_t capacity() const {
+ return mCapacity;
+ }
+
+ /* Returns the number of buckets that the hashtable has, which is the size of its
+ * underlying array.
+ */
+ inline size_t bucketCount() const {
+ return mBucketCount;
+ }
+
+ /* Returns the load factor of the hashtable. */
+ inline float loadFactor() const {
+ return mLoadFactor;
+ };
+
+ /* Returns a const reference to the entry at the specified index.
+ *
+ * index: The index of the entry to retrieve. Must be a valid index within
+ * the bounds of the hashtable.
+ */
+ inline const TEntry& entryAt(size_t index) const {
+ return entryFor(bucketAt(mBuckets, index));
+ }
+
+ /* Returns a non-const reference to the entry at the specified index.
+ *
+ * index: The index of the entry to edit. Must be a valid index within
+ * the bounds of the hashtable.
+ */
+ inline TEntry& editEntryAt(size_t index) {
+ edit();
+ return entryFor(bucketAt(mBuckets, index));
+ }
+
+ /* Clears the hashtable.
+ * All entries in the hashtable are destroyed immediately.
+ * If you need to do something special with the entries in the hashtable then iterate
+ * over them and do what you need before clearing the hashtable.
+ */
+ inline void clear() {
+ BasicHashtableImpl::clear();
+ }
+
+ /* Returns the index of the next entry in the hashtable given the index of a previous entry.
+ * If the given index is -1, then returns the index of the first entry in the hashtable,
+ * if there is one, or -1 otherwise.
+ * If the given index is not -1, then returns the index of the next entry in the hashtable,
+ * in strictly increasing order, or -1 if there are none left.
+ *
+ * index: The index of the previous entry that was iterated, or -1 to begin
+ * iteration at the beginning of the hashtable.
+ */
+ inline ssize_t next(ssize_t index) const {
+ return BasicHashtableImpl::next(index);
+ }
+
+ /* Finds the index of an entry with the specified key.
+ * If the given index is -1, then returns the index of the first matching entry,
+ * otherwise returns the index of the next matching entry.
+ * If the hashtable contains multiple entries with keys that match the requested
+ * key, then the sequence of entries returned is arbitrary.
+ * Returns -1 if no entry was found.
+ *
+ * index: The index of the previous entry with the specified key, or -1 to
+ * find the first matching entry.
+ * hash: The hashcode of the key.
+ * key: The key.
+ */
+ inline ssize_t find(ssize_t index, hash_t hash, const TKey& key) const {
+ return BasicHashtableImpl::find(index, hash, &key);
+ }
+
+ /* Adds the entry to the hashtable.
+ * Returns the index of the newly added entry.
+ * If an entry with the same key already exists, then a duplicate entry is added.
+ * If the entry will not fit, then the hashtable's capacity is increased and
+ * its contents are rehashed. See rehash().
+ *
+ * hash: The hashcode of the key.
+ * entry: The entry to add.
+ */
+ inline size_t add(hash_t hash, const TEntry& entry) {
+ return BasicHashtableImpl::add(hash, &entry);
+ }
+
+ /* Removes the entry with the specified index from the hashtable.
+ * The entry is destroyed immediately.
+ * The index must be valid.
+ *
+ * The hashtable is not compacted after an item is removed, so it is legal
+ * to continue iterating over the hashtable using next() or find().
+ *
+ * index: The index of the entry to remove. Must be a valid index within the
+ * bounds of the hashtable, and it must refer to an existing entry.
+ */
+ inline void removeAt(size_t index) {
+ BasicHashtableImpl::removeAt(index);
+ }
+
+ /* Rehashes the contents of the hashtable.
+ * Grows the hashtable to at least the specified minimum capacity or the
+ * current number of elements, whichever is larger.
+ *
+ * Rehashing causes all entries to be copied and the entry indices may change.
+ * Although the hash codes are cached by the hashtable, rehashing can be an
+ * expensive operation and should be avoided unless the hashtable's size
+ * needs to be changed.
+ *
+ * Rehashing is the only way to change the capacity or load factor of the
+ * hashtable once it has been created. It can be used to compact the
+ * hashtable by choosing a minimum capacity that is smaller than the current
+ * capacity (such as 0).
+ *
+ * minimumCapacity: The desired minimum capacity after rehashing.
+ * loadFactor: The desired load factor after rehashing.
+ */
+ inline void rehash(size_t minimumCapacity, float loadFactor) {
+ BasicHashtableImpl::rehash(minimumCapacity, loadFactor);
+ }
+
+ /* Determines whether there is room to add another entry without rehashing.
+ * When this returns true, a subsequent add() operation is guaranteed to
+ * complete without performing a rehash.
+ */
+ inline bool hasMoreRoom() const {
+ return mCapacity > mFilledBuckets;
+ }
+
+protected:
+ static inline const TEntry& entryFor(const Bucket& bucket) {
+ return reinterpret_cast<const TEntry&>(bucket.entry);
+ }
+
+ static inline TEntry& entryFor(Bucket& bucket) {
+ return reinterpret_cast<TEntry&>(bucket.entry);
+ }
+
+ virtual bool compareBucketKey(const Bucket& bucket, const void* __restrict__ key) const;
+ virtual void initializeBucketEntry(Bucket& bucket, const void* __restrict__ entry) const;
+ virtual void destroyBucketEntry(Bucket& bucket) const;
+
+private:
+ // For dumping the raw contents of a hashtable during testing.
+ friend class BasicHashtableTest;
+ inline uint32_t cookieAt(size_t index) const {
+ return bucketAt(mBuckets, index).cookie;
+ }
+};
+
+template <typename TKey, typename TEntry>
+BasicHashtable<TKey, TEntry>::BasicHashtable(size_t minimumInitialCapacity, float loadFactor) :
+ BasicHashtableImpl(sizeof(TEntry), traits<TEntry>::has_trivial_dtor,
+ minimumInitialCapacity, loadFactor) {
+}
+
+template <typename TKey, typename TEntry>
+BasicHashtable<TKey, TEntry>::BasicHashtable(const BasicHashtable<TKey, TEntry>& other) :
+ BasicHashtableImpl(other) {
+}
+
+template <typename TKey, typename TEntry>
+BasicHashtable<TKey, TEntry>::~BasicHashtable() {
+ dispose();
+}
+
+template <typename TKey, typename TEntry>
+bool BasicHashtable<TKey, TEntry>::compareBucketKey(const Bucket& bucket,
+ const void* __restrict__ key) const {
+ return entryFor(bucket).getKey() == *static_cast<const TKey*>(key);
+}
+
+template <typename TKey, typename TEntry>
+void BasicHashtable<TKey, TEntry>::initializeBucketEntry(Bucket& bucket,
+ const void* __restrict__ entry) const {
+ if (!traits<TEntry>::has_trivial_copy) {
+ new (&entryFor(bucket)) TEntry(*(static_cast<const TEntry*>(entry)));
+ } else {
+ memcpy(&entryFor(bucket), entry, sizeof(TEntry));
+ }
+}
+
+template <typename TKey, typename TEntry>
+void BasicHashtable<TKey, TEntry>::destroyBucketEntry(Bucket& bucket) const {
+ if (!traits<TEntry>::has_trivial_dtor) {
+ entryFor(bucket).~TEntry();
+ }
+}
+
+}; // namespace android
+
+#endif // ANDROID_BASIC_HASHTABLE_H
diff --git a/libutils/Android.mk b/libutils/Android.mk
index 8c4fd15..9f3b1df 100644
--- a/libutils/Android.mk
+++ b/libutils/Android.mk
@@ -15,6 +15,7 @@
LOCAL_PATH:= $(call my-dir)
commonSources:= \
+ BasicHashtable.cpp \
CallStack.cpp \
FileMap.cpp \
JenkinsHash.cpp \
diff --git a/libutils/BasicHashtable.cpp b/libutils/BasicHashtable.cpp
new file mode 100644
index 0000000..1e9f053
--- /dev/null
+++ b/libutils/BasicHashtable.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BasicHashtable"
+
+#include <math.h>
+
+#include <utils/Log.h>
+#include <utils/BasicHashtable.h>
+#include <utils/misc.h>
+
+#include "SharedBuffer.h"
+
+namespace android {
+
+BasicHashtableImpl::BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
+ size_t minimumInitialCapacity, float loadFactor) :
+ mBucketSize(entrySize + sizeof(Bucket)), mHasTrivialDestructor(hasTrivialDestructor),
+ mLoadFactor(loadFactor), mSize(0),
+ mFilledBuckets(0), mBuckets(NULL) {
+ determineCapacity(minimumInitialCapacity, mLoadFactor, &mBucketCount, &mCapacity);
+}
+
+BasicHashtableImpl::BasicHashtableImpl(const BasicHashtableImpl& other) :
+ mBucketSize(other.mBucketSize), mHasTrivialDestructor(other.mHasTrivialDestructor),
+ mCapacity(other.mCapacity), mLoadFactor(other.mLoadFactor),
+ mSize(other.mSize), mFilledBuckets(other.mFilledBuckets),
+ mBucketCount(other.mBucketCount), mBuckets(other.mBuckets) {
+ if (mBuckets) {
+ SharedBuffer::bufferFromData(mBuckets)->acquire();
+ }
+}
+
+BasicHashtableImpl::~BasicHashtableImpl()
+{
+}
+
+void BasicHashtableImpl::edit() {
+ if (mBuckets && !SharedBuffer::bufferFromData(mBuckets)->onlyOwner()) {
+ clone();
+ }
+}
+
+void BasicHashtableImpl::dispose() {
+ if (mBuckets) {
+ releaseBuckets(mBuckets, mBucketCount);
+ }
+}
+
+void BasicHashtableImpl::clone() {
+ if (mBuckets) {
+ void* newBuckets = allocateBuckets(mBucketCount);
+ copyBuckets(mBuckets, newBuckets, mBucketCount);
+ releaseBuckets(mBuckets, mBucketCount);
+ mBuckets = newBuckets;
+ }
+}
+
+void BasicHashtableImpl::setTo(const BasicHashtableImpl& other) {
+ if (mBuckets) {
+ releaseBuckets(mBuckets, mBucketCount);
+ }
+
+ mCapacity = other.mCapacity;
+ mLoadFactor = other.mLoadFactor;
+ mSize = other.mSize;
+ mFilledBuckets = other.mFilledBuckets;
+ mBucketCount = other.mBucketCount;
+ mBuckets = other.mBuckets;
+
+ if (mBuckets) {
+ SharedBuffer::bufferFromData(mBuckets)->acquire();
+ }
+}
+
+void BasicHashtableImpl::clear() {
+ if (mBuckets) {
+ if (mFilledBuckets) {
+ SharedBuffer* sb = SharedBuffer::bufferFromData(mBuckets);
+ if (sb->onlyOwner()) {
+ destroyBuckets(mBuckets, mBucketCount);
+ for (size_t i = 0; i < mBucketCount; i++) {
+ Bucket& bucket = bucketAt(mBuckets, i);
+ bucket.cookie = 0;
+ }
+ } else {
+ releaseBuckets(mBuckets, mBucketCount);
+ mBuckets = NULL;
+ }
+ mFilledBuckets = 0;
+ }
+ mSize = 0;
+ }
+}
+
+ssize_t BasicHashtableImpl::next(ssize_t index) const {
+ if (mSize) {
+ while (size_t(++index) < mBucketCount) {
+ const Bucket& bucket = bucketAt(mBuckets, index);
+ if (bucket.cookie & Bucket::PRESENT) {
+ return index;
+ }
+ }
+ }
+ return -1;
+}
+
+ssize_t BasicHashtableImpl::find(ssize_t index, hash_t hash,
+ const void* __restrict__ key) const {
+ if (!mSize) {
+ return -1;
+ }
+
+ hash = trimHash(hash);
+ if (index < 0) {
+ index = chainStart(hash, mBucketCount);
+
+ const Bucket& bucket = bucketAt(mBuckets, size_t(index));
+ if (bucket.cookie & Bucket::PRESENT) {
+ if (compareBucketKey(bucket, key)) {
+ return index;
+ }
+ } else {
+ if (!(bucket.cookie & Bucket::COLLISION)) {
+ return -1;
+ }
+ }
+ }
+
+ size_t inc = chainIncrement(hash, mBucketCount);
+ for (;;) {
+ index = chainSeek(index, inc, mBucketCount);
+
+ const Bucket& bucket = bucketAt(mBuckets, size_t(index));
+ if (bucket.cookie & Bucket::PRESENT) {
+ if ((bucket.cookie & Bucket::HASH_MASK) == hash
+ && compareBucketKey(bucket, key)) {
+ return index;
+ }
+ }
+ if (!(bucket.cookie & Bucket::COLLISION)) {
+ return -1;
+ }
+ }
+}
+
+size_t BasicHashtableImpl::add(hash_t hash, const void* entry) {
+ if (!mBuckets) {
+ mBuckets = allocateBuckets(mBucketCount);
+ } else {
+ edit();
+ }
+
+ hash = trimHash(hash);
+ for (;;) {
+ size_t index = chainStart(hash, mBucketCount);
+ Bucket* bucket = &bucketAt(mBuckets, size_t(index));
+ if (bucket->cookie & Bucket::PRESENT) {
+ size_t inc = chainIncrement(hash, mBucketCount);
+ do {
+ bucket->cookie |= Bucket::COLLISION;
+ index = chainSeek(index, inc, mBucketCount);
+ bucket = &bucketAt(mBuckets, size_t(index));
+ } while (bucket->cookie & Bucket::PRESENT);
+ }
+
+ uint32_t collision = bucket->cookie & Bucket::COLLISION;
+ if (!collision) {
+ if (mFilledBuckets >= mCapacity) {
+ rehash(mCapacity * 2, mLoadFactor);
+ continue;
+ }
+ mFilledBuckets += 1;
+ }
+
+ bucket->cookie = collision | Bucket::PRESENT | hash;
+ mSize += 1;
+ initializeBucketEntry(*bucket, entry);
+ return index;
+ }
+}
+
+void BasicHashtableImpl::removeAt(size_t index) {
+ edit();
+
+ Bucket& bucket = bucketAt(mBuckets, index);
+ bucket.cookie &= ~Bucket::PRESENT;
+ if (!(bucket.cookie & Bucket::COLLISION)) {
+ mFilledBuckets -= 1;
+ }
+ mSize -= 1;
+ if (!mHasTrivialDestructor) {
+ destroyBucketEntry(bucket);
+ }
+}
+
+void BasicHashtableImpl::rehash(size_t minimumCapacity, float loadFactor) {
+ if (minimumCapacity < mSize) {
+ minimumCapacity = mSize;
+ }
+ size_t newBucketCount, newCapacity;
+ determineCapacity(minimumCapacity, loadFactor, &newBucketCount, &newCapacity);
+
+ if (newBucketCount != mBucketCount || newCapacity != mCapacity) {
+ if (mBuckets) {
+ void* newBuckets;
+ if (mSize) {
+ newBuckets = allocateBuckets(newBucketCount);
+ for (size_t i = 0; i < mBucketCount; i++) {
+ const Bucket& fromBucket = bucketAt(mBuckets, i);
+ if (fromBucket.cookie & Bucket::PRESENT) {
+ hash_t hash = fromBucket.cookie & Bucket::HASH_MASK;
+ size_t index = chainStart(hash, newBucketCount);
+ Bucket* toBucket = &bucketAt(newBuckets, size_t(index));
+ if (toBucket->cookie & Bucket::PRESENT) {
+ size_t inc = chainIncrement(hash, newBucketCount);
+ do {
+ toBucket->cookie |= Bucket::COLLISION;
+ index = chainSeek(index, inc, newBucketCount);
+ toBucket = &bucketAt(newBuckets, size_t(index));
+ } while (toBucket->cookie & Bucket::PRESENT);
+ }
+ toBucket->cookie = Bucket::PRESENT | hash;
+ initializeBucketEntry(*toBucket, fromBucket.entry);
+ }
+ }
+ } else {
+ newBuckets = NULL;
+ }
+ releaseBuckets(mBuckets, mBucketCount);
+ mBuckets = newBuckets;
+ mFilledBuckets = mSize;
+ }
+ mBucketCount = newBucketCount;
+ mCapacity = newCapacity;
+ }
+ mLoadFactor = loadFactor;
+}
+
+void* BasicHashtableImpl::allocateBuckets(size_t count) const {
+ size_t bytes = count * mBucketSize;
+ SharedBuffer* sb = SharedBuffer::alloc(bytes);
+ LOG_ALWAYS_FATAL_IF(!sb, "Could not allocate %u bytes for hashtable with %u buckets.",
+ uint32_t(bytes), uint32_t(count));
+ void* buckets = sb->data();
+ for (size_t i = 0; i < count; i++) {
+ Bucket& bucket = bucketAt(buckets, i);
+ bucket.cookie = 0;
+ }
+ return buckets;
+}
+
+void BasicHashtableImpl::releaseBuckets(void* __restrict__ buckets, size_t count) const {
+ SharedBuffer* sb = SharedBuffer::bufferFromData(buckets);
+ if (sb->release(SharedBuffer::eKeepStorage) == 1) {
+ destroyBuckets(buckets, count);
+ SharedBuffer::dealloc(sb);
+ }
+}
+
+void BasicHashtableImpl::destroyBuckets(void* __restrict__ buckets, size_t count) const {
+ if (!mHasTrivialDestructor) {
+ for (size_t i = 0; i < count; i++) {
+ Bucket& bucket = bucketAt(buckets, i);
+ if (bucket.cookie & Bucket::PRESENT) {
+ destroyBucketEntry(bucket);
+ }
+ }
+ }
+}
+
+void BasicHashtableImpl::copyBuckets(const void* __restrict__ fromBuckets,
+ void* __restrict__ toBuckets, size_t count) const {
+ for (size_t i = 0; i < count; i++) {
+ const Bucket& fromBucket = bucketAt(fromBuckets, i);
+ Bucket& toBucket = bucketAt(toBuckets, i);
+ toBucket.cookie = fromBucket.cookie;
+ if (fromBucket.cookie & Bucket::PRESENT) {
+ initializeBucketEntry(toBucket, fromBucket.entry);
+ }
+ }
+}
+
+// Table of 31-bit primes where each prime is no less than twice as large
+// as the previous one. Generated by "primes.py".
+static size_t PRIMES[] = {
+ 5,
+ 11,
+ 23,
+ 47,
+ 97,
+ 197,
+ 397,
+ 797,
+ 1597,
+ 3203,
+ 6421,
+ 12853,
+ 25717,
+ 51437,
+ 102877,
+ 205759,
+ 411527,
+ 823117,
+ 1646237,
+ 3292489,
+ 6584983,
+ 13169977,
+ 26339969,
+ 52679969,
+ 105359939,
+ 210719881,
+ 421439783,
+ 842879579,
+ 1685759167,
+ 0,
+};
+
+void BasicHashtableImpl::determineCapacity(size_t minimumCapacity, float loadFactor,
+ size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity) {
+ LOG_ALWAYS_FATAL_IF(loadFactor <= 0.0f || loadFactor > 1.0f,
+ "Invalid load factor %0.3f. Must be in the range (0, 1].", loadFactor);
+
+ size_t count = ceilf(minimumCapacity / loadFactor) + 1;
+ size_t i = 0;
+ while (count > PRIMES[i] && i < NELEM(PRIMES)) {
+ i++;
+ }
+ count = PRIMES[i];
+ LOG_ALWAYS_FATAL_IF(!count, "Could not determine required number of buckets for "
+ "hashtable with minimum capacity %u and load factor %0.3f.",
+ uint32_t(minimumCapacity), loadFactor);
+ *outBucketCount = count;
+ *outCapacity = ceilf((count - 1) * loadFactor);
+}
+
+}; // namespace android
diff --git a/libutils/tests/Android.mk b/libutils/tests/Android.mk
index 47415ab..d4a45fd 100644
--- a/libutils/tests/Android.mk
+++ b/libutils/tests/Android.mk
@@ -22,6 +22,7 @@
LOCAL_MODULE := libutils_tests
LOCAL_SRC_FILES := \
+ BasicHashtable_test.cpp \
BlobCache_test.cpp \
BitSet_test.cpp \
Looper_test.cpp \
diff --git a/libutils/tests/BasicHashtable_test.cpp b/libutils/tests/BasicHashtable_test.cpp
new file mode 100644
index 0000000..4b3a717
--- /dev/null
+++ b/libutils/tests/BasicHashtable_test.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BasicHashtable_test"
+
+#include <utils/BasicHashtable.h>
+#include <cutils/log.h>
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+namespace {
+
+typedef int SimpleKey;
+typedef int SimpleValue;
+typedef android::key_value_pair_t<SimpleKey, SimpleValue> SimpleEntry;
+typedef android::BasicHashtable<SimpleKey, SimpleEntry> SimpleHashtable;
+
+struct ComplexKey {
+ int k;
+
+ explicit ComplexKey(int k) : k(k) {
+ instanceCount += 1;
+ }
+
+ ComplexKey(const ComplexKey& other) : k(other.k) {
+ instanceCount += 1;
+ }
+
+ ~ComplexKey() {
+ instanceCount -= 1;
+ }
+
+ bool operator ==(const ComplexKey& other) const {
+ return k == other.k;
+ }
+
+ bool operator !=(const ComplexKey& other) const {
+ return k != other.k;
+ }
+
+ static ssize_t instanceCount;
+};
+
+ssize_t ComplexKey::instanceCount = 0;
+
+struct ComplexValue {
+ int v;
+
+ explicit ComplexValue(int v) : v(v) {
+ instanceCount += 1;
+ }
+
+ ComplexValue(const ComplexValue& other) : v(other.v) {
+ instanceCount += 1;
+ }
+
+ ~ComplexValue() {
+ instanceCount -= 1;
+ }
+
+ static ssize_t instanceCount;
+};
+
+ssize_t ComplexValue::instanceCount = 0;
+
+} // namespace
+
+
+namespace android {
+
+typedef key_value_pair_t<ComplexKey, ComplexValue> ComplexEntry;
+typedef BasicHashtable<ComplexKey, ComplexEntry> ComplexHashtable;
+
+template<> inline hash_t hash_type(const ComplexKey& value) {
+ return hash_type(value.k);
+}
+
+class BasicHashtableTest : public testing::Test {
+protected:
+ virtual void SetUp() {
+ ComplexKey::instanceCount = 0;
+ ComplexValue::instanceCount = 0;
+ }
+
+ virtual void TearDown() {
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+ }
+
+ void assertInstanceCount(ssize_t keys, ssize_t values) {
+ if (keys != ComplexKey::instanceCount || values != ComplexValue::instanceCount) {
+ FAIL() << "Expected " << keys << " keys and " << values << " values "
+ "but there were actually " << ComplexKey::instanceCount << " keys and "
+ << ComplexValue::instanceCount << " values";
+ }
+ }
+
+public:
+ template <typename TKey, typename TEntry>
+ static void cookieAt(const BasicHashtable<TKey, TEntry>& h, size_t index,
+ bool* collision, bool* present, hash_t* hash) {
+ uint32_t cookie = h.cookieAt(index);
+ *collision = cookie & BasicHashtable<TKey, TEntry>::Bucket::COLLISION;
+ *present = cookie & BasicHashtable<TKey, TEntry>::Bucket::PRESENT;
+ *hash = cookie & BasicHashtable<TKey, TEntry>::Bucket::HASH_MASK;
+ }
+
+ template <typename TKey, typename TEntry>
+ static const void* getBuckets(const BasicHashtable<TKey, TEntry>& h) {
+ return h.mBuckets;
+ }
+};
+
+template <typename TKey, typename TValue>
+static size_t add(BasicHashtable<TKey, key_value_pair_t<TKey, TValue> >& h,
+ const TKey& key, const TValue& value) {
+ return h.add(hash_type(key), key_value_pair_t<TKey, TValue>(key, value));
+}
+
+template <typename TKey, typename TValue>
+static ssize_t find(BasicHashtable<TKey, key_value_pair_t<TKey, TValue> >& h,
+ ssize_t index, const TKey& key) {
+ return h.find(index, hash_type(key), key);
+}
+
+template <typename TKey, typename TValue>
+static bool remove(BasicHashtable<TKey, key_value_pair_t<TKey, TValue> >& h,
+ const TKey& key) {
+ ssize_t index = find(h, -1, key);
+ if (index >= 0) {
+ h.removeAt(index);
+ return true;
+ }
+ return false;
+}
+
+template <typename TEntry>
+static void getKeyValue(const TEntry& entry, int* key, int* value);
+
+template <> void getKeyValue(const SimpleEntry& entry, int* key, int* value) {
+ *key = entry.key;
+ *value = entry.value;
+}
+
+template <> void getKeyValue(const ComplexEntry& entry, int* key, int* value) {
+ *key = entry.key.k;
+ *value = entry.value.v;
+}
+
+template <typename TKey, typename TValue>
+static void dump(BasicHashtable<TKey, key_value_pair_t<TKey, TValue> >& h) {
+ ALOGD("hashtable %p, size=%u, capacity=%u, bucketCount=%u",
+ &h, h.size(), h.capacity(), h.bucketCount());
+ for (size_t i = 0; i < h.bucketCount(); i++) {
+ bool collision, present;
+ hash_t hash;
+ BasicHashtableTest::cookieAt(h, i, &collision, &present, &hash);
+ if (present) {
+ int key, value;
+ getKeyValue(h.entryAt(i), &key, &value);
+ ALOGD(" [%3u] = collision=%d, present=%d, hash=0x%08x, key=%3d, value=%3d, "
+ "hash_type(key)=0x%08x",
+ i, collision, present, hash, key, value, hash_type(key));
+ } else {
+ ALOGD(" [%3u] = collision=%d, present=%d",
+ i, collision, present);
+ }
+ }
+}
+
+TEST_F(BasicHashtableTest, DefaultConstructor_WithDefaultProperties) {
+ SimpleHashtable h;
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Constructor_WithNonUnityLoadFactor) {
+ SimpleHashtable h(52, 0.8f);
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(77U, h.capacity());
+ EXPECT_EQ(97U, h.bucketCount());
+ EXPECT_EQ(0.8f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Constructor_WithUnityLoadFactorAndExactCapacity) {
+ SimpleHashtable h(46, 1.0f);
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(46U, h.capacity()); // must be one less than bucketCount because loadFactor == 1.0f
+ EXPECT_EQ(47U, h.bucketCount());
+ EXPECT_EQ(1.0f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Constructor_WithUnityLoadFactorAndInexactCapacity) {
+ SimpleHashtable h(42, 1.0f);
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(46U, h.capacity()); // must be one less than bucketCount because loadFactor == 1.0f
+ EXPECT_EQ(47U, h.bucketCount());
+ EXPECT_EQ(1.0f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, FindAddFindRemoveFind_OneEntry) {
+ SimpleHashtable h;
+ ssize_t index = find(h, -1, 8);
+ ASSERT_EQ(-1, index);
+
+ index = add(h, 8, 1);
+ ASSERT_EQ(1U, h.size());
+
+ ASSERT_EQ(index, find(h, -1, 8));
+ ASSERT_EQ(8, h.entryAt(index).key);
+ ASSERT_EQ(1, h.entryAt(index).value);
+
+ index = find(h, index, 8);
+ ASSERT_EQ(-1, index);
+
+ ASSERT_TRUE(remove(h, 8));
+ ASSERT_EQ(0U, h.size());
+
+ index = find(h, -1, 8);
+ ASSERT_EQ(-1, index);
+}
+
+TEST_F(BasicHashtableTest, FindAddFindRemoveFind_MultipleEntryWithUniqueKey) {
+ const size_t N = 11;
+
+ SimpleHashtable h;
+ for (size_t i = 0; i < N; i++) {
+ ssize_t index = find(h, -1, int(i));
+ ASSERT_EQ(-1, index);
+
+ index = add(h, int(i), int(i * 10));
+ ASSERT_EQ(i + 1, h.size());
+
+ ASSERT_EQ(index, find(h, -1, int(i)));
+ ASSERT_EQ(int(i), h.entryAt(index).key);
+ ASSERT_EQ(int(i * 10), h.entryAt(index).value);
+
+ index = find(h, index, int(i));
+ ASSERT_EQ(-1, index);
+ }
+
+ for (size_t i = N; --i > 0; ) {
+ ASSERT_TRUE(remove(h, int(i))) << "i = " << i;
+ ASSERT_EQ(i, h.size());
+
+ ssize_t index = find(h, -1, int(i));
+ ASSERT_EQ(-1, index);
+ }
+}
+
+TEST_F(BasicHashtableTest, FindAddFindRemoveFind_MultipleEntryWithDuplicateKey) {
+ const size_t N = 11;
+ const int K = 1;
+
+ SimpleHashtable h;
+ for (size_t i = 0; i < N; i++) {
+ ssize_t index = find(h, -1, K);
+ if (i == 0) {
+ ASSERT_EQ(-1, index);
+ } else {
+ ASSERT_NE(-1, index);
+ }
+
+ add(h, K, int(i));
+ ASSERT_EQ(i + 1, h.size());
+
+ index = -1;
+ int values = 0;
+ for (size_t j = 0; j <= i; j++) {
+ index = find(h, index, K);
+ ASSERT_GE(index, 0);
+ ASSERT_EQ(K, h.entryAt(index).key);
+ values |= 1 << h.entryAt(index).value;
+ }
+ ASSERT_EQ(values, (1 << (i + 1)) - 1);
+
+ index = find(h, index, K);
+ ASSERT_EQ(-1, index);
+ }
+
+ for (size_t i = N; --i > 0; ) {
+ ASSERT_TRUE(remove(h, K)) << "i = " << i;
+ ASSERT_EQ(i, h.size());
+
+ ssize_t index = -1;
+ for (size_t j = 0; j < i; j++) {
+ index = find(h, index, K);
+ ASSERT_GE(index, 0);
+ ASSERT_EQ(K, h.entryAt(index).key);
+ }
+
+ index = find(h, index, K);
+ ASSERT_EQ(-1, index);
+ }
+}
+
+TEST_F(BasicHashtableTest, Clear_WhenAlreadyEmpty_DoesNothing) {
+ SimpleHashtable h;
+ h.clear();
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Clear_AfterElementsAdded_RemovesThem) {
+ SimpleHashtable h;
+ add(h, 0, 0);
+ add(h, 1, 0);
+ h.clear();
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Clear_AfterElementsAdded_DestroysThem) {
+ ComplexHashtable h;
+ add(h, ComplexKey(0), ComplexValue(0));
+ add(h, ComplexKey(1), ComplexValue(0));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+
+ h.clear();
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Remove_AfterElementsAdded_DestroysThem) {
+ ComplexHashtable h;
+ add(h, ComplexKey(0), ComplexValue(0));
+ add(h, ComplexKey(1), ComplexValue(0));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+
+ ASSERT_TRUE(remove(h, ComplexKey(0)));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(1, 1));
+
+ ASSERT_TRUE(remove(h, ComplexKey(1)));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+}
+
+TEST_F(BasicHashtableTest, Destructor_AfterElementsAdded_DestroysThem) {
+ {
+ ComplexHashtable h;
+ add(h, ComplexKey(0), ComplexValue(0));
+ add(h, ComplexKey(1), ComplexValue(0));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ } // h is destroyed here
+
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+}
+
+TEST_F(BasicHashtableTest, Next_WhenEmpty_ReturnsMinusOne) {
+ SimpleHashtable h;
+
+ ASSERT_EQ(-1, h.next(-1));
+}
+
+TEST_F(BasicHashtableTest, Next_WhenNonEmpty_IteratesOverAllEntries) {
+ const int N = 88;
+
+ SimpleHashtable h;
+ for (int i = 0; i < N; i++) {
+ add(h, i, i * 10);
+ }
+
+ bool set[N];
+ memset(set, 0, sizeof(bool) * N);
+ int count = 0;
+ for (ssize_t index = -1; (index = h.next(index)) != -1; ) {
+ ASSERT_GE(index, 0);
+ ASSERT_LT(size_t(index), h.bucketCount());
+
+ const SimpleEntry& entry = h.entryAt(index);
+ ASSERT_GE(entry.key, 0);
+ ASSERT_LT(entry.key, N);
+ ASSERT_FALSE(set[entry.key]);
+ ASSERT_EQ(entry.key * 10, entry.value);
+
+ set[entry.key] = true;
+ count += 1;
+ }
+ ASSERT_EQ(N, count);
+}
+
+TEST_F(BasicHashtableTest, Add_RehashesOnDemand) {
+ SimpleHashtable h;
+ size_t initialCapacity = h.capacity();
+ size_t initialBucketCount = h.bucketCount();
+
+ for (size_t i = 0; i < initialCapacity; i++) {
+ add(h, int(i), 0);
+ }
+
+ EXPECT_EQ(initialCapacity, h.size());
+ EXPECT_EQ(initialCapacity, h.capacity());
+ EXPECT_EQ(initialBucketCount, h.bucketCount());
+
+ add(h, -1, -1);
+
+ EXPECT_EQ(initialCapacity + 1, h.size());
+ EXPECT_GT(h.capacity(), initialCapacity);
+ EXPECT_GT(h.bucketCount(), initialBucketCount);
+ EXPECT_GT(h.bucketCount(), h.capacity());
+}
+
+TEST_F(BasicHashtableTest, Rehash_WhenCapacityAndBucketCountUnchanged_DoesNothing) {
+ ComplexHashtable h;
+ add(h, ComplexKey(0), ComplexValue(0));
+ const void* oldBuckets = getBuckets(h);
+ ASSERT_NE((void*)NULL, oldBuckets);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(1, 1));
+
+ h.rehash(h.capacity(), h.loadFactor());
+
+ ASSERT_EQ(oldBuckets, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(1, 1));
+}
+
+TEST_F(BasicHashtableTest, Rehash_WhenEmptyAndHasNoBuckets_ButDoesNotAllocateBuckets) {
+ ComplexHashtable h;
+ ASSERT_EQ((void*)NULL, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+
+ h.rehash(9, 1.0f);
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(10U, h.capacity());
+ EXPECT_EQ(11U, h.bucketCount());
+ EXPECT_EQ(1.0f, h.loadFactor());
+ EXPECT_EQ((void*)NULL, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+}
+
+TEST_F(BasicHashtableTest, Rehash_WhenEmptyAndHasBuckets_ReleasesBucketsAndSetsCapacity) {
+ ComplexHashtable h(10);
+ add(h, ComplexKey(0), ComplexValue(0));
+ ASSERT_TRUE(remove(h, ComplexKey(0)));
+ ASSERT_NE((void*)NULL, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+
+ h.rehash(0, 0.75f);
+
+ EXPECT_EQ(0U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+ EXPECT_EQ((void*)NULL, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(0, 0));
+}
+
+TEST_F(BasicHashtableTest, Rehash_WhenLessThanCurrentCapacity_ShrinksBuckets) {
+ ComplexHashtable h(10);
+ add(h, ComplexKey(0), ComplexValue(0));
+ add(h, ComplexKey(1), ComplexValue(1));
+ const void* oldBuckets = getBuckets(h);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+
+ h.rehash(0, 0.75f);
+
+ EXPECT_EQ(2U, h.size());
+ EXPECT_EQ(3U, h.capacity());
+ EXPECT_EQ(5U, h.bucketCount());
+ EXPECT_EQ(0.75f, h.loadFactor());
+ EXPECT_NE(oldBuckets, getBuckets(h));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+}
+
+TEST_F(BasicHashtableTest, CopyOnWrite) {
+ ComplexHashtable h1;
+ add(h1, ComplexKey(0), ComplexValue(0));
+ add(h1, ComplexKey(1), ComplexValue(1));
+ const void* originalBuckets = getBuckets(h1);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ssize_t index0 = find(h1, -1, ComplexKey(0));
+ EXPECT_GE(index0, 0);
+
+ // copy constructor acquires shared reference
+ ComplexHashtable h2(h1);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ASSERT_EQ(originalBuckets, getBuckets(h2));
+ EXPECT_EQ(h1.size(), h2.size());
+ EXPECT_EQ(h1.capacity(), h2.capacity());
+ EXPECT_EQ(h1.bucketCount(), h2.bucketCount());
+ EXPECT_EQ(h1.loadFactor(), h2.loadFactor());
+ EXPECT_EQ(index0, find(h2, -1, ComplexKey(0)));
+
+ // operator= acquires shared reference
+ ComplexHashtable h3;
+ h3 = h2;
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ASSERT_EQ(originalBuckets, getBuckets(h3));
+ EXPECT_EQ(h1.size(), h3.size());
+ EXPECT_EQ(h1.capacity(), h3.capacity());
+ EXPECT_EQ(h1.bucketCount(), h3.bucketCount());
+ EXPECT_EQ(h1.loadFactor(), h3.loadFactor());
+ EXPECT_EQ(index0, find(h3, -1, ComplexKey(0)));
+
+ // editEntryAt copies shared contents
+ h1.editEntryAt(index0).value.v = 42;
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(4, 4));
+ ASSERT_NE(originalBuckets, getBuckets(h1));
+ EXPECT_EQ(42, h1.entryAt(index0).value.v);
+ EXPECT_EQ(0, h2.entryAt(index0).value.v);
+ EXPECT_EQ(0, h3.entryAt(index0).value.v);
+
+ // clear releases reference to shared contents
+ h2.clear();
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(4, 4));
+ EXPECT_EQ(0U, h2.size());
+ ASSERT_NE(originalBuckets, getBuckets(h2));
+
+ // operator= acquires shared reference, destroys unshared contents
+ h1 = h3;
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ASSERT_EQ(originalBuckets, getBuckets(h1));
+ EXPECT_EQ(h3.size(), h1.size());
+ EXPECT_EQ(h3.capacity(), h1.capacity());
+ EXPECT_EQ(h3.bucketCount(), h1.bucketCount());
+ EXPECT_EQ(h3.loadFactor(), h1.loadFactor());
+ EXPECT_EQ(index0, find(h1, -1, ComplexKey(0)));
+
+ // add copies shared contents
+ add(h1, ComplexKey(2), ComplexValue(2));
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(5, 5));
+ ASSERT_NE(originalBuckets, getBuckets(h1));
+ EXPECT_EQ(3U, h1.size());
+ EXPECT_EQ(0U, h2.size());
+ EXPECT_EQ(2U, h3.size());
+
+ // remove copies shared contents
+ h1 = h3;
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ASSERT_EQ(originalBuckets, getBuckets(h1));
+ h1.removeAt(index0);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(3, 3));
+ ASSERT_NE(originalBuckets, getBuckets(h1));
+ EXPECT_EQ(1U, h1.size());
+ EXPECT_EQ(0U, h2.size());
+ EXPECT_EQ(2U, h3.size());
+
+ // rehash copies shared contents
+ h1 = h3;
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(2, 2));
+ ASSERT_EQ(originalBuckets, getBuckets(h1));
+ h1.rehash(10, 1.0f);
+ ASSERT_NO_FATAL_FAILURE(assertInstanceCount(4, 4));
+ ASSERT_NE(originalBuckets, getBuckets(h1));
+ EXPECT_EQ(2U, h1.size());
+ EXPECT_EQ(0U, h2.size());
+ EXPECT_EQ(2U, h3.size());
+}
+
+} // namespace android