Add Handle/HandleScope and delete SirtRef.

Delete SirtRef and replaced it with Handle. Handles are value types
which wrap around StackReference*.

Renamed StackIndirectReferenceTable to HandleScope.

Added a scoped handle wrapper which wraps around an Object** and
restores it in its destructor.

Renamed Handle::get -> Get.

Bug: 8473721

Change-Id: Idbfebd4f35af629f0f43931b7c5184b334822c7a
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index d05f45b..02dd4d9 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
   // Record that you have freed some objects or large objects, calls Heap::RecordFree.
-  // TODO: These are not thread safe, add a lock if we get have parallel sweeping.
+  // TODO: These are not thread safe, add a lock if we get parallel sweeping.
   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
   void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5de7026..cc258f5 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -106,7 +106,7 @@
 
 void MarkSweep::InitializePhase() {
   TimingLogger::ScopedSplit split("InitializePhase", &timings_);
-  mark_stack_ = heap_->mark_stack_.get();
+  mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_region_.Reset();
   class_count_ = 0;
@@ -123,7 +123,7 @@
   mark_fastpath_count_ = 0;
   mark_slowpath_count_ = 0;
   {
-    // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     mark_bitmap_ = heap_->GetMarkBitmap();
   }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3ebc0af..cfb0b5e 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -264,7 +264,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by
+  // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
   // IsExclusiveHeld.
   void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
 
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 55140f6..47682cc 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -50,7 +50,7 @@
   return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
 }
 
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// Used to mark and copy objects. Any newly-marked objects who are in the from space Get moved to
 // the to-space and have their forward address updated. Objects which have been newly marked are
 // pushed on the mark stack.
 template<bool kPoisonReferences>
@@ -72,7 +72,7 @@
         forward_address = MarkNonForwardedObject(obj);
         DCHECK(forward_address != nullptr);
         // Make sure to only update the forwarding address AFTER you copy the object so that the
-        // monitor word doesn't get stomped over.
+        // monitor word doesn't Get stomped over.
         obj->SetLockWord(
             LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
         // Push the object onto the mark stack for later processing.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a406f6d..95a2c96 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -142,7 +142,7 @@
 
 void SemiSpace::InitializePhase() {
   TimingLogger::ScopedSplit split("InitializePhase", &timings_);
-  mark_stack_ = heap_->mark_stack_.get();
+  mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_region_.Reset();
   is_large_object_space_immune_ = false;
@@ -154,7 +154,7 @@
   // Set the initial bitmap.
   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
   {
-    // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     mark_bitmap_ = heap_->GetMarkBitmap();
   }
@@ -172,7 +172,7 @@
   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
   if (kStoreStackTraces) {
     Locks::mutator_lock_->AssertExclusiveHeld(self_);
-    // Store the stack traces into the runtime fault string in case we get a heap corruption
+    // Store the stack traces into the runtime fault string in case we Get a heap corruption
     // related crash later.
     ThreadState old_state = self_->SetStateUnsafe(kRunnable);
     std::ostringstream oss;
@@ -231,7 +231,7 @@
   BindBitmaps();
   // Process dirty cards and add dirty cards to mod-union tables.
   heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
-  // Clear the whole card table since we can not get any additional dirty cards during the
+  // Clear the whole card table since we can not Get any additional dirty cards during the
   // paused GC. This saves memory but only works for pause the world collectors.
   timings_.NewSplit("ClearCardTable");
   heap_->GetCardTable()->ClearCardTable();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9fdf471..4b1ecc4 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -242,7 +242,7 @@
   // heap. When false, collect only the bump pointer spaces.
   bool whole_heap_collection_;
 
-  // How many objects and bytes we moved, used so that we don't need to get the size of the
+  // How many objects and bytes we moved, used so that we don't need to Get the size of the
   // to_space_ when calculating how many objects and bytes we freed.
   size_t bytes_moved_;
   size_t objects_moved_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index ce51ac5..5a58446 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -49,7 +49,7 @@
 
 void StickyMarkSweep::MarkReachableObjects() {
   // All reachable objects must be referenced by a root or a dirty card, so we can clear the mark
-  // stack here since all objects in the mark stack will get scanned by the card scanning anyways.
+  // stack here since all objects in the mark stack will Get scanned by the card scanning anyways.
   // TODO: Not put these objects in the mark stack in the first place.
   mark_stack_->Reset();
   RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index a06f272..7cee5a0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -27,7 +27,7 @@
 #include "gc/space/large_object_space.h"
 #include "gc/space/rosalloc_space-inl.h"
 #include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
 #include "thread.h"
 #include "thread-inl.h"
 #include "verify_object-inl.h"
@@ -144,10 +144,10 @@
       mirror::Object** end_address;
       while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
                                                 &start_address, &end_address)) {
-        // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
-        SirtRefNoVerify<mirror::Object> ref(self, *obj);
+        // TODO: Add handle VerifyObject.
+        StackHandleScope<1> hs(self);
+        HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
         CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-        *obj = ref.get();
       }
       self->SetThreadLocalAllocationStack(start_address, end_address);
       // Retry on the new thread-local allocation stack.
@@ -159,10 +159,10 @@
     // This is safe to do since the GC will never free objects which are neither in the allocation
     // stack or the live bitmap.
     while (!allocation_stack_->AtomicPushBack(*obj)) {
-      // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
-      SirtRefNoVerify<mirror::Object> ref(self, *obj);
+      // TODO: Add handle VerifyObject.
+      StackHandleScope<1> hs(self);
+      HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
       CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
-      *obj = ref.get();
     }
   }
 }
@@ -300,11 +300,7 @@
 inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
                                     mirror::Object** obj) {
   if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
-    // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
-    SirtRef<mirror::Object> ref(self, *obj);
-    RequestConcurrentGC(self);
-    // Restore obj in case it moved.
-    *obj = ref.get();
+    RequestConcurrentGCAndSaveObject(self, obj);
   }
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7235729..4642a98 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -62,7 +62,7 @@
 #include "runtime.h"
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
 #include "thread_list.h"
 #include "UniquePtr.h"
 #include "well_known_classes.h"
@@ -1070,10 +1070,11 @@
                                              size_t alloc_size, size_t* bytes_allocated,
                                              size_t* usable_size,
                                              mirror::Class** klass) {
-  mirror::Object* ptr = nullptr;
   bool was_default_allocator = allocator == GetCurrentAllocator();
   DCHECK(klass != nullptr);
-  SirtRef<mirror::Class> sirt_klass(self, *klass);
+  StackHandleScope<1> hs(self);
+  HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
+  klass = nullptr;  // Invalidate for safety.
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1081,31 +1082,32 @@
     // If we were the default allocator but the allocator changed while we were suspended,
     // abort the allocation.
     if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      *klass = sirt_klass.get();
       return nullptr;
     }
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
-    ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+                                                     usable_size);
+    if (ptr != nullptr) {
+      return ptr;
+    }
   }
 
   collector::GcType tried_type = next_gc_type_;
-  if (ptr == nullptr) {
-    const bool gc_ran =
-        CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      *klass = sirt_klass.get();
-      return nullptr;
-    }
-    if (gc_ran) {
-      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+  const bool gc_ran =
+      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+    return nullptr;
+  }
+  if (gc_ran) {
+    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+                                                     usable_size);
+    if (ptr != nullptr) {
+      return ptr;
     }
   }
 
   // Loop through our different Gc types and try to Gc until we get enough free memory.
   for (collector::GcType gc_type : gc_plan_) {
-    if (ptr != nullptr) {
-      break;
-    }
     if (gc_type == tried_type) {
       continue;
     }
@@ -1113,40 +1115,41 @@
     const bool gc_ran =
         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
     if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      *klass = sirt_klass.get();
       return nullptr;
     }
     if (gc_ran) {
       // Did we free sufficient memory for the allocation to succeed?
-      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+                                                       usable_size);
+      if (ptr != nullptr) {
+        return ptr;
+      }
     }
   }
   // Allocations have failed after GCs;  this is an exceptional state.
-  if (ptr == nullptr) {
-    // Try harder, growing the heap if necessary.
-    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+  // Try harder, growing the heap if necessary.
+  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+                                                  usable_size);
+  if (ptr != nullptr) {
+    return ptr;
   }
-  if (ptr == nullptr) {
-    // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
-    // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
-    // VM spec requires that all SoftReferences have been collected and cleared before throwing
-    // OOME.
-    VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
-             << " allocation";
-    // TODO: Run finalization, but this may cause more allocations to occur.
-    // We don't need a WaitForGcToComplete here either.
-    DCHECK(!gc_plan_.empty());
-    CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
-    if (was_default_allocator && allocator != GetCurrentAllocator()) {
-      *klass = sirt_klass.get();
-      return nullptr;
-    }
-    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
-    if (ptr == nullptr) {
-      ThrowOutOfMemoryError(self, alloc_size, false);
-    }
+  // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+  // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+  // VM spec requires that all SoftReferences have been collected and cleared before throwing
+  // OOME.
+  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+           << " allocation";
+  // TODO: Run finalization, but this may cause more allocations to occur.
+  // We don't need a WaitForGcToComplete here either.
+  DCHECK(!gc_plan_.empty());
+  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+  if (was_default_allocator && allocator != GetCurrentAllocator()) {
+    return nullptr;
   }
-  *klass = sirt_klass.get();
+  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+  if (ptr == nullptr) {
+    ThrowOutOfMemoryError(self, alloc_size, false);
+  }
   return ptr;
 }
 
@@ -2536,6 +2539,12 @@
   *object = soa.Decode<mirror::Object*>(arg.get());
 }
 
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+  StackHandleScope<1> hs(self);
+  HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+  RequestConcurrentGC(self);
+}
+
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f71de1a..3b071d1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -412,7 +412,7 @@
     return GetTotalMemory() - num_bytes_allocated_;
   }
 
-  // Get the space that corresponds to an object's address. Current implementation searches all
+  // get the space that corresponds to an object's address. Current implementation searches all
   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
   // TODO: consider using faster data structure like binary tree.
   space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
@@ -582,6 +582,10 @@
                                        mirror::Object** obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  accounting::ObjectStack* GetMarkStack() {
+    return mark_stack_.get();
+  }
+
   // We don't force this to be inlined since it is a slow path.
   template <bool kInstrumented, typename PreFenceVisitor>
   mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
@@ -634,7 +638,10 @@
   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
       LOCKS_EXCLUDED(heap_trim_request_lock_);
   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
-  void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+  void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void RequestConcurrentGC(Thread* self)
+      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   bool IsGCRequestPending() const;
 
   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index a85ad4d..8850b92 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -20,7 +20,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
 
 namespace art {
 namespace gc {
@@ -43,14 +43,16 @@
     ScopedObjectAccess soa(Thread::Current());
     // garbage is created during ClassLinker::Init
 
-    SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
-                                                                        "[Ljava/lang/Object;"));
+    StackHandleScope<1> hs(soa.Self());
+    Handle<mirror::Class> c(
+        hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
     for (size_t i = 0; i < 1024; ++i) {
-      SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
-          mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
+      StackHandleScope<1> hs(soa.Self());
+      Handle<mirror::ObjectArray<mirror::Object> > array(hs.NewHandle(
+          mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
       for (size_t j = 0; j < 2048; ++j) {
         mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
-        // SIRT operator -> deferences the SIRT before running the method.
+        // handle scope operator -> deferences the handle scope before running the method.
         array->Set<false>(j, string);
       }
     }
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 7493c19..ba46dcc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -24,7 +24,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 3335e72..ce101e4 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -48,7 +48,8 @@
   }
 
   mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SirtRef<mirror::ClassLoader> null_loader(self, nullptr);
+    StackHandleScope<1> hs(self);
+    auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
     if (byte_array_class_ == nullptr) {
       mirror::Class* byte_array_class =
           Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
@@ -62,10 +63,11 @@
   mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
                         size_t* bytes_allocated, size_t* usable_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
     if (obj != nullptr) {
-      InstallClass(obj, byte_array_class.get(), bytes);
+      InstallClass(obj, byte_array_class.Get(), bytes);
     }
     return obj;
   }
@@ -73,10 +75,11 @@
   mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
                                   size_t* bytes_allocated, size_t* usable_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
     mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
     if (obj != nullptr) {
-      InstallClass(obj, byte_array_class.get(), bytes);
+      InstallClass(obj, byte_array_class.Get(), bytes);
     }
     return obj;
   }
@@ -177,9 +180,10 @@
 
   // Succeeds, fits without adjusting the footprint limit.
   size_t ptr1_bytes_allocated, ptr1_usable_size;
-  SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
-                                           &ptr1_usable_size));
-  EXPECT_TRUE(ptr1.get() != nullptr);
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::Object> ptr1(
+      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+  EXPECT_TRUE(ptr1.Get() != nullptr);
   EXPECT_LE(1U * MB, ptr1_bytes_allocated);
   EXPECT_LE(1U * MB, ptr1_usable_size);
   EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -190,9 +194,9 @@
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated, ptr3_usable_size;
-  SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
-                                                     &ptr3_usable_size));
-  EXPECT_TRUE(ptr3.get() != nullptr);
+  Handle<mirror::Object> ptr3(
+      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+  EXPECT_TRUE(ptr3.Get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
   EXPECT_LE(8U * MB, ptr3_usable_size);
   EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -206,23 +210,23 @@
   EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
   EXPECT_EQ(free3, ptr3_bytes_allocated);
-  EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
+  EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
   size_t ptr6_bytes_allocated, ptr6_usable_size;
-  SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
-                                                     &ptr6_usable_size));
-  EXPECT_TRUE(ptr6.get() != nullptr);
+  Handle<mirror::Object> ptr6(
+      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+  EXPECT_TRUE(ptr6.Get() != nullptr);
   EXPECT_LE(9U * MB, ptr6_bytes_allocated);
   EXPECT_LE(9U * MB, ptr6_usable_size);
   EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
-  space->Free(self, ptr1.reset(nullptr));
+  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+  space->Free(self, ptr1.Assign(nullptr));
   EXPECT_LE(1U * MB, free1);
 
   // Make sure that the zygote space isn't directly at the start of the space.
@@ -243,8 +247,8 @@
   AddSpace(space, false);
 
   // Succeeds, fits without adjusting the footprint limit.
-  ptr1.reset(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
-  EXPECT_TRUE(ptr1.get() != nullptr);
+  ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+  EXPECT_TRUE(ptr1.Get() != nullptr);
   EXPECT_LE(1U * MB, ptr1_bytes_allocated);
   EXPECT_LE(1U * MB, ptr1_usable_size);
   EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -254,16 +258,16 @@
   EXPECT_TRUE(ptr2 == nullptr);
 
   // Succeeds, adjusts the footprint.
-  ptr3.reset(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
-  EXPECT_TRUE(ptr3.get() != nullptr);
+  ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+  EXPECT_TRUE(ptr3.Get() != nullptr);
   EXPECT_LE(2U * MB, ptr3_bytes_allocated);
   EXPECT_LE(2U * MB, ptr3_usable_size);
   EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
-  space->Free(self, ptr3.reset(nullptr));
+  space->Free(self, ptr3.Assign(nullptr));
 
   // Final clean up.
-  free1 = space->AllocationSize(ptr1.get(), nullptr);
-  space->Free(self, ptr1.reset(nullptr));
+  free1 = space->AllocationSize(ptr1.Get(), nullptr);
+  space->Free(self, ptr1.Assign(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
 
@@ -279,9 +283,10 @@
 
   // Succeeds, fits without adjusting the footprint limit.
   size_t ptr1_bytes_allocated, ptr1_usable_size;
-  SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
-                                           &ptr1_usable_size));
-  EXPECT_TRUE(ptr1.get() != nullptr);
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::Object> ptr1(
+      hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+  EXPECT_TRUE(ptr1.Get() != nullptr);
   EXPECT_LE(1U * MB, ptr1_bytes_allocated);
   EXPECT_LE(1U * MB, ptr1_usable_size);
   EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -292,9 +297,9 @@
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated, ptr3_usable_size;
-  SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
-                                                     &ptr3_usable_size));
-  EXPECT_TRUE(ptr3.get() != nullptr);
+  Handle<mirror::Object> ptr3(
+      hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+  EXPECT_TRUE(ptr3.Get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
   EXPECT_LE(8U * MB, ptr3_usable_size);
   EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -308,23 +313,23 @@
   EXPECT_TRUE(ptr5 == nullptr);
 
   // Release some memory.
-  size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+  size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
   EXPECT_EQ(free3, ptr3_bytes_allocated);
-  space->Free(self, ptr3.reset(nullptr));
+  space->Free(self, ptr3.Assign(nullptr));
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
   size_t ptr6_bytes_allocated, ptr6_usable_size;
-  SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
-                                                     &ptr6_usable_size));
-  EXPECT_TRUE(ptr6.get() != nullptr);
+  Handle<mirror::Object> ptr6(
+      hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+  EXPECT_TRUE(ptr6.Get() != nullptr);
   EXPECT_LE(9U * MB, ptr6_bytes_allocated);
   EXPECT_LE(9U * MB, ptr6_usable_size);
   EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
 
   // Final clean up.
-  size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
-  space->Free(self, ptr1.reset(nullptr));
+  size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+  space->Free(self, ptr1.Assign(nullptr));
   EXPECT_LE(1U * MB, free1);
 }
 
@@ -345,8 +350,6 @@
     lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
                                &usable_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
-    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
-    lots_of_objects[i] = obj.get();
     size_t computed_usable_size;
     EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
     EXPECT_EQ(usable_size, computed_usable_size);
@@ -360,8 +363,6 @@
     size_t allocation_size, usable_size;
     lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
     EXPECT_TRUE(lots_of_objects[i] != nullptr);
-    SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
-    lots_of_objects[i] = obj.get();
     size_t computed_usable_size;
     EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
     EXPECT_EQ(usable_size, computed_usable_size);
@@ -418,18 +419,19 @@
           alloc_size = size_of_zero_length_byte_array;
         }
       }
-      SirtRef<mirror::Object> object(self, nullptr);
+      StackHandleScope<1> hs(soa.Self());
+      auto object(hs.NewHandle<mirror::Object>(nullptr));
       size_t bytes_allocated = 0;
       if (round <= 1) {
-        object.reset(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+        object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
       } else {
-        object.reset(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+        object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
       }
       footprint = space->GetFootprint();
       EXPECT_GE(space->Size(), footprint);  // invariant
-      if (object.get() != nullptr) {  // allocation succeeded
-        lots_of_objects[i] = object.get();
-        size_t allocation_size = space->AllocationSize(object.get(), nullptr);
+      if (object.Get() != nullptr) {  // allocation succeeded
+        lots_of_objects[i] = object.Get();
+        size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
         EXPECT_EQ(bytes_allocated, allocation_size);
         if (object_size > 0) {
           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -509,16 +511,17 @@
   space->RevokeAllThreadLocalBuffers();
 
   // All memory was released, try a large allocation to check freed memory is being coalesced
-  SirtRef<mirror::Object> large_object(self, nullptr);
+  StackHandleScope<1> hs(soa.Self());
+  auto large_object(hs.NewHandle<mirror::Object>(nullptr));
   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
   size_t bytes_allocated = 0;
   if (round <= 1) {
-    large_object.reset(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+    large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
   } else {
-    large_object.reset(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
-                                       nullptr));
+    large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
+                                        nullptr));
   }
-  EXPECT_TRUE(large_object.get() != nullptr);
+  EXPECT_TRUE(large_object.Get() != nullptr);
 
   // Sanity check footprint
   footprint = space->GetFootprint();
@@ -527,7 +530,7 @@
   EXPECT_LE(space->Size(), growth_limit);
 
   // Clean up
-  space->Free(self, large_object.reset(nullptr));
+  space->Free(self, large_object.Assign(nullptr));
 
   // Sanity check footprint
   footprint = space->GetFootprint();