Make ART compile with GCC -O0 again.

Tidy up InstructionSetFeatures so that it has a type hierarchy dependent on
architecture.
Add to instruction_set_test to warn when InstructionSetFeatures don't agree
with ones from system properties, AT_HWCAP and /proc/cpuinfo.
Clean-up class linker entry point logic to not return entry points but to
test whether the passed code is the particular entrypoint. This works around
image trampolines that replicate entrypoints.
Bug: 17993736

Change-Id: I5f4b49e88c3b02a79f9bee04f83395146ed7be23
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index dbf4abc..4ef8478 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -24,7 +24,6 @@
 #include "atomic.h"
 #include "base/timing_logger.h"
 #include "globals.h"
-#include "gtest/gtest.h"
 #include "jni.h"
 #include "object_callbacks.h"
 #include "offsets.h"
@@ -45,44 +44,56 @@
 class ReferenceQueue {
  public:
   explicit ReferenceQueue(Mutex* lock);
+
   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
   void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
   // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
   // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
   // overhead.
   void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to the
   // zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
                                   IsHeapReferenceMarkedCallback* is_marked_callback,
                                   MarkObjectCallback* mark_object_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Walks the reference list marking any references subject to the reference clearing policy.
   // References with a black referent are removed from the list.  References with white referents
   // biased toward saving are blackened and also removed from the list.
   void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Unlink the reference list clearing references objects with white referents.  Cleared references
   // registered to a reference queue are scheduled for appending by the heap worker thread.
   void ClearWhiteReferences(ReferenceQueue* cleared_references,
                             IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   void Dump(std::ostream& os) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   bool IsEmpty() const {
     return list_ == nullptr;
   }
+
   void Clear() {
     list_ = nullptr;
   }
+
   mirror::Reference* GetList() {
     return list_;
   }
+
   // Visits list_, currently only used for the mark compact GC.
   void UpdateRoots(IsMarkedCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -90,10 +101,13 @@
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
-  Mutex* lock_;
+  Mutex* const lock_;
+
   // The actual reference list. Only a root for the mark compact GC since it will be null for other
   // GC types.
   mirror::Reference* list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReferenceQueue);
 };
 
 }  // namespace gc