GC clean up.

Greater use of directories and namespaces.
Fix bugs that cause verify options to fail.
Address numerous other issues:

GC barrier wait occurring holding locks:
GC barrier waits occur when we wait for threads to run the check point function
on themselves. This is happening with the heap bitmap and mutator lock held
meaning that a thread that tries to take either lock exclusively will block
waiting on a thread that is waiting. If this thread is the thread we're waiting
to run the check point then the VM will deadlock.
This deadlock occurred unnoticed as the call to check for wait safety was
removed in: https://googleplex-android-review.googlesource.com/#/c/249423/1.

NewTimingLogger:
Existing timing log states when a split ends but not when it begins. This isn't
good for systrace, in the context of GC it means that races between mutators
and the GC are hard to discover what phase the GC is in, we know what phase it
just finished and derive but that's not ideal.

Support for only 1 discontinuous space:
Code special cases continuous and large object space, rather than assuming we
can have a collection of both.

Sorted atomic stacks:
Used to improve verification performance. Simplify their use and add extra
checks.

Simplify mod-union table abstractions.

Reduce use of std::strings and their associated overhead in hot code.

Make time units of fields explicit.

Reduce confusion that IsAllocSpace is really IsDlMallocSpace.

Make GetTotalMemory (exposed via System) equal to the footprint (as in Dalvik)
rather than the max memory footprint.

Change-Id: Ie87067140fa4499b15edab691fe6565d79599812
diff --git a/src/gc/accounting/atomic_stack.h b/src/gc/accounting/atomic_stack.h
new file mode 100644
index 0000000..4e1c253
--- /dev/null
+++ b/src/gc/accounting/atomic_stack.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
+#define ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
+
+#include <string>
+
+#include "atomic_integer.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "UniquePtr.h"
+#include "mem_map.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+template <typename T>
+class AtomicStack {
+ public:
+  // Capacity is how many elements we can store in the stack.
+  static AtomicStack* Create(const std::string& name, size_t capacity) {
+    UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
+    mark_stack->Init();
+    return mark_stack.release();
+  }
+
+  ~AtomicStack() {}
+
+  void Reset() {
+    DCHECK(mem_map_.get() != NULL);
+    DCHECK(begin_ != NULL);
+    front_index_ = 0;
+    back_index_ = 0;
+    is_sorted_ = true;
+    int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
+    if (result == -1) {
+      PLOG(WARNING) << "madvise failed";
+    }
+  }
+
+  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
+
+  // Returns false if we overflowed the stack.
+  bool AtomicPushBack(const T& value) {
+    int32_t index;
+    is_sorted_ = false;
+    do {
+      index = back_index_;
+      if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
+        // Stack overflow.
+        return false;
+      }
+    } while(!back_index_.CompareAndSwap(index, index + 1));
+    begin_[index] = value;
+    return true;
+  }
+
+  void PushBack(const T& value) {
+    is_sorted_ = false;
+    int32_t index = back_index_;
+    DCHECK_LT(static_cast<size_t>(index), capacity_);
+    back_index_ = index + 1;
+    begin_[index] = value;
+  }
+
+  T PopBack() {
+    DCHECK_GT(back_index_, front_index_);
+    // Decrement the back index non atomically.
+    back_index_ = back_index_ - 1;
+    return begin_[back_index_];
+  }
+
+  // Take an item from the front of the stack.
+  T PopFront() {
+    int32_t index = front_index_;
+    DCHECK_LT(index, back_index_.get());
+    front_index_ = front_index_ + 1;
+    return begin_[index];
+  }
+
+  bool IsEmpty() const {
+    return Size() == 0;
+  }
+
+  size_t Size() const {
+    DCHECK_LE(front_index_, back_index_);
+    return back_index_ - front_index_;
+  }
+
+  T* Begin() const {
+    return const_cast<mirror::Object**>(begin_ + front_index_);
+  }
+
+  T* End() const {
+    return const_cast<mirror::Object**>(begin_ + back_index_);
+  }
+
+  size_t Capacity() const {
+    return capacity_;
+  }
+
+  // Will clear the stack.
+  void Resize(size_t new_capacity) {
+    capacity_ = new_capacity;
+    Init();
+  }
+
+  void Sort() {
+    if (!is_sorted_) {
+      int32_t start_back_index = back_index_.get();
+      int32_t start_front_index = front_index_.get();
+      is_sorted_ = true;
+      std::sort(Begin(), End());
+      CHECK_EQ(start_back_index, back_index_.get());
+      CHECK_EQ(start_front_index, front_index_.get());
+    }
+  }
+
+  bool Contains(const T& value) const {
+    if (is_sorted_) {
+      return std::binary_search(Begin(), End(), value);
+    } else {
+      return std::find(Begin(), End(), value) != End();
+    }
+  }
+
+ private:
+  AtomicStack(const std::string& name, const size_t capacity)
+      : name_(name),
+        back_index_(0),
+        front_index_(0),
+        begin_(NULL),
+        capacity_(capacity),
+        is_sorted_(true) {
+  }
+
+  // Size in number of elements.
+  void Init() {
+    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T), PROT_READ | PROT_WRITE));
+    CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack";
+    byte* addr = mem_map_->Begin();
+    CHECK(addr != NULL);
+    begin_ = reinterpret_cast<T*>(addr);
+    Reset();
+  }
+
+  // Name of the mark stack.
+  std::string name_;
+
+  // Memory mapping of the atomic stack.
+  UniquePtr<MemMap> mem_map_;
+
+  // Back index (index after the last element pushed).
+  AtomicInteger back_index_;
+
+  // Front index, used for implementing PopFront.
+  AtomicInteger front_index_;
+
+  // Base of the atomic stack.
+  T* begin_;
+
+  // Maximum number of elements.
+  size_t capacity_;
+
+  bool is_sorted_;
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
+};
+
+typedef AtomicStack<mirror::Object*> ObjectStack;
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_