Merge "ART: Add entries_ lock for race condition"
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 682fa28..741657b 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1322,9 +1322,9 @@
 /* Returns sp-relative offset in bytes for a VReg */
 int Mir2Lir::VRegOffset(int v_reg) {
   const DexFile::CodeItem* code_item = mir_graph_->GetCurrentDexCompilationUnit()->GetCodeItem();
-  return StackVisitor::GetVRegOffset(code_item, core_spill_mask_,
-                                     fp_spill_mask_, frame_size_, v_reg,
-                                     cu_->instruction_set);
+  return StackVisitor::GetVRegOffsetFromQuickCode(code_item, core_spill_mask_,
+                                                  fp_spill_mask_, frame_size_, v_reg,
+                                                  cu_->instruction_set);
 }
 
 /* Returns sp-relative offset in bytes for a SReg */
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 7d256ae..742d83e 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -705,6 +705,11 @@
         break;
       }
 
+      case Location::kInvalid: {
+        stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+        break;
+      }
+
       default:
         LOG(FATAL) << "Unexpected kind " << location.GetKind();
     }
@@ -745,54 +750,6 @@
   }
 }
 
-void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  size_t stack_offset = first_register_slot_in_slow_path_;
-  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
-    if (!IsCoreCalleeSaveRegister(i)) {
-      if (register_set->ContainsCoreRegister(i)) {
-        // If the register holds an object, update the stack mask.
-        if (locations->RegisterContainsObject(i)) {
-          locations->SetStackBit(stack_offset / kVRegSize);
-        }
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += SaveCoreRegister(stack_offset, i);
-      }
-    }
-  }
-
-  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
-    if (!IsFloatingPointCalleeSaveRegister(i)) {
-      if (register_set->ContainsFloatingPointRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += SaveFloatingPointRegister(stack_offset, i);
-      }
-    }
-  }
-}
-
-void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  size_t stack_offset = first_register_slot_in_slow_path_;
-  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
-    if (!IsCoreCalleeSaveRegister(i)) {
-      if (register_set->ContainsCoreRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += RestoreCoreRegister(stack_offset, i);
-      }
-    }
-  }
-
-  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
-    if (!IsFloatingPointCalleeSaveRegister(i)) {
-      if (register_set->ContainsFloatingPointRegister(i)) {
-        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
-        stack_offset += RestoreFloatingPointRegister(stack_offset, i);
-      }
-    }
-  }
-}
-
 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
   LocationSummary* locations = suspend_check->GetLocations();
   HBasicBlock* block = suspend_check->GetBlock();
@@ -819,4 +776,56 @@
   GetMoveResolver()->EmitNativeCode(&parallel_move);
 }
 
+void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) {
+  codegen->RecordPcInfo(instruction, dex_pc);
+}
+
+void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+  RegisterSet* register_set = locations->GetLiveRegisters();
+  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+    if (!codegen->IsCoreCalleeSaveRegister(i)) {
+      if (register_set->ContainsCoreRegister(i)) {
+        // If the register holds an object, update the stack mask.
+        if (locations->RegisterContainsObject(i)) {
+          locations->SetStackBit(stack_offset / kVRegSize);
+        }
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->SaveCoreRegister(stack_offset, i);
+      }
+    }
+  }
+
+  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
+      if (register_set->ContainsFloatingPointRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
+      }
+    }
+  }
+}
+
+void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+  RegisterSet* register_set = locations->GetLiveRegisters();
+  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+    if (!codegen->IsCoreCalleeSaveRegister(i)) {
+      if (register_set->ContainsCoreRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
+      }
+    }
+  }
+
+  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
+      if (register_set->ContainsFloatingPointRegister(i)) {
+        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+        stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
+      }
+    }
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b8f4572..81fc684 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -73,6 +73,10 @@
 
   virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
 
+  void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+  void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+  void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
+
  private:
   DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
 };
@@ -182,8 +186,6 @@
   void BuildNativeGCMap(
       std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
   void BuildStackMaps(std::vector<uint8_t>* vector);
-  void SaveLiveRegisters(LocationSummary* locations);
-  void RestoreLiveRegisters(LocationSummary* locations);
 
   bool IsLeafMethod() const {
     return is_leaf_;
@@ -267,6 +269,15 @@
     }
   }
 
+  size_t GetFirstRegisterSlotInSlowPath() const {
+    return first_register_slot_in_slow_path_;
+  }
+
+  uint32_t FrameEntrySpillSize() const {
+    return GetFpuSpillSize() + GetCoreSpillSize();
+  }
+
+
  protected:
   CodeGenerator(HGraph* graph,
                 size_t number_of_core_registers,
@@ -326,10 +337,6 @@
     return POPCOUNT(core_spill_mask_) * GetWordSize();
   }
 
-  uint32_t FrameEntrySpillSize() const {
-    return GetFpuSpillSize() + GetCoreSpillSize();
-  }
-
   bool HasAllocatedCalleeSaveRegisters() const {
     // We check the core registers against 1 because it always comprises the return PC.
     return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 07cc41a..aed8c06 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -114,10 +114,10 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     arm_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ b(GetReturnLabel());
     } else {
@@ -188,7 +188,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
@@ -204,7 +204,7 @@
       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
       arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
     }
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -235,7 +235,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
@@ -244,7 +244,7 @@
         QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
     arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -272,7 +272,7 @@
 
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -291,7 +291,7 @@
       arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ b(GetExitLabel());
   }
 
@@ -1205,6 +1205,7 @@
   Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
 
   codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
@@ -3861,7 +3862,6 @@
     __ bl(GetFrameEntryLabel());
   }
 
-  RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!IsLeafMethod());
 }
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c21084a..93c4ce5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -191,7 +191,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
@@ -213,7 +213,7 @@
       arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -244,7 +244,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
@@ -255,7 +255,7 @@
     Primitive::Type type = instruction_->GetType();
     arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -292,11 +292,11 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     arm64_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ B(GetReturnLabel());
     } else {
@@ -338,7 +338,7 @@
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
 
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -360,7 +360,7 @@
       CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ B(GetExitLabel());
   }
 
@@ -1920,7 +1920,6 @@
     __ Bl(&frame_entry_label_);
   }
 
-  RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!IsLeafMethod());
 }
 
@@ -1931,6 +1930,7 @@
 
   Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
   codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a09ecb8..1db1600 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -158,16 +158,16 @@
 
 class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
  public:
-  explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
+  SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ jmp(GetReturnLabel());
     } else {
@@ -198,15 +198,15 @@
 
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
     __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
     x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
 
     __ jmp(GetExitLabel());
   }
@@ -231,7 +231,7 @@
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
@@ -239,7 +239,7 @@
     __ fs()->call(Address::Absolute(do_clinit_
         ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
         : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
-    codegen->RecordPcInfo(at_, dex_pc_);
+    RecordPcInfo(codegen, at_, dex_pc_);
 
     // Move the class to the desired location.
     Location out = locations->Out();
@@ -248,7 +248,7 @@
       x86_codegen->Move32(out, Location::RegisterLocation(EAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -287,7 +287,7 @@
 
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -306,11 +306,11 @@
       __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
     }
 
-    codegen->RecordPcInfo(instruction_, dex_pc_);
+    RecordPcInfo(codegen, instruction_, dex_pc_);
     if (instruction_->IsInstanceOf()) {
       x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
     }
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
 
     __ jmp(GetExitLabel());
   }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 07ba95d..90d87d4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -72,7 +72,7 @@
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -88,7 +88,7 @@
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -136,10 +136,10 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
     __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
     if (successor_ == nullptr) {
       __ jmp(GetReturnLabel());
     } else {
@@ -181,7 +181,7 @@
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
     __ gs()->call(Address::Absolute(
         QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
   }
 
  private:
@@ -207,7 +207,7 @@
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
@@ -215,7 +215,7 @@
     __ gs()->call(Address::Absolute((do_clinit_
           ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
           : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
-    codegen->RecordPcInfo(at_, dex_pc_);
+    RecordPcInfo(codegen, at_, dex_pc_);
 
     Location out = locations->Out();
     // Move the class to the desired location.
@@ -224,7 +224,7 @@
       x64_codegen->Move(out, Location::RegisterLocation(RAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -255,7 +255,7 @@
 
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
     x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
@@ -263,9 +263,9 @@
             Immediate(instruction_->GetStringIndex()));
     __ gs()->call(Address::Absolute(
         QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
     x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -293,7 +293,7 @@
 
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(locations);
+    SaveLiveRegisters(codegen, locations);
 
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -312,13 +312,13 @@
       __ gs()->call(
           Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
     }
-    codegen->RecordPcInfo(instruction_, dex_pc_);
+    RecordPcInfo(codegen, instruction_, dex_pc_);
 
     if (instruction_->IsInstanceOf()) {
       x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
     }
 
-    codegen->RestoreLiveRegisters(locations);
+    RestoreLiveRegisters(codegen, locations);
     __ jmp(GetExitLabel());
   }
 
@@ -374,7 +374,6 @@
   }
 
   DCHECK(!IsLeafMethod());
-  RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1216,6 +1215,7 @@
   codegen_->GenerateStaticOrDirectCall(
       invoke,
       invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e22f7cc..bd9267c 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -38,6 +38,11 @@
 static constexpr int kDepthLimit = 5;
 
 void HInliner::Run() {
+  if (graph_->IsDebuggable()) {
+    // For simplicity, we currently never inline when the graph is debuggable. This avoids
+    // doing some logic in the runtime to discover if a method could have been inlined.
+    return;
+  }
   const GrowableArray<HBasicBlock*>& blocks = graph_->GetReversePostOrder();
   for (size_t i = 0; i < blocks.Size(); ++i) {
     HBasicBlock* block = blocks.Get(i);
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index a82d80a..0c9eb94 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -114,12 +114,13 @@
     CodeGeneratorARM* codegen = down_cast<CodeGeneratorARM*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -133,7 +134,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ b(GetExitLabel());
   }
 
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1ddff8a..19b04ae 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -122,12 +122,13 @@
     CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -141,7 +142,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ B(GetExitLabel());
   }
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index c73f092..2064b18 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -134,12 +134,13 @@
     CodeGeneratorX86_64* codegen = down_cast<CodeGeneratorX86_64*>(codegen_in);
     __ Bind(GetEntryLabel());
 
-    codegen->SaveLiveRegisters(invoke_->GetLocations());
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
 
     MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
+      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -153,7 +154,7 @@
       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
     }
 
-    codegen->RestoreLiveRegisters(invoke_->GetLocations());
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
     __ jmp(GetExitLabel());
   }
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ec3d743..a35fa1d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1109,6 +1109,7 @@
 
   bool HasUses() const { return !uses_.IsEmpty() || !env_uses_.IsEmpty(); }
   bool HasEnvironmentUses() const { return !env_uses_.IsEmpty(); }
+  bool HasNonEnvironmentUses() const { return !uses_.IsEmpty(); }
 
   // Does this instruction strictly dominate `other_instruction`?
   // Returns false if this instruction and `other_instruction` are the same.
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index b414fb2..24dc449 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -33,7 +33,9 @@
  *
  * (a) Dex registers that do not require merging (that is, they do not
  *     have different values at a join block) are available to all their
- *     environment uses.
+ *     environment uses. Note that it does not imply the instruction will
+ *     have a physical location after register allocation. See the
+ *     SsaLivenessAnalysis phase.
  *
  * (b) Dex registers that require merging, and the merging gives
  *     incompatible types, will be killed for environment uses of that merge.
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index d009390..c0d6f42 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -230,11 +230,12 @@
       }
 
       if (current->HasEnvironment()) {
-        // All instructions in the environment must be live.
+        // Handle environment uses. See statements (b) and (c) of the
+        // SsaLivenessAnalysis.
         HEnvironment* environment = current->GetEnvironment();
         for (size_t i = 0, e = environment->Size(); i < e; ++i) {
           HInstruction* instruction = environment->GetInstructionAt(i);
-          if (instruction != nullptr) {
+          if (ShouldBeLiveForEnvironment(instruction)) {
             DCHECK(instruction->HasSsaIndex());
             live_in->SetBit(instruction->GetSsaIndex());
             instruction->GetLiveInterval()->AddUse(current, i, true);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 5787f0c..b57029d 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -302,7 +302,7 @@
       first_range_->start_ = from;
     } else {
       // Instruction without uses.
-      DCHECK(!defined_by_->HasUses());
+      DCHECK(!defined_by_->HasNonEnvironmentUses());
       DCHECK(from == defined_by_->GetLifetimePosition());
       first_range_ = last_range_ = new (allocator_) LiveRange(from, from + 2, nullptr);
     }
@@ -798,6 +798,22 @@
   DISALLOW_COPY_AND_ASSIGN(LiveInterval);
 };
 
+/**
+ * Analysis that computes the liveness of instructions:
+ *
+ * (a) Non-environment uses of an instruction always make
+ *     the instruction live.
+ * (b) Environment uses of an instruction whose type is
+ *     object (that is, non-primitive), make the instruction live.
+ *     This is due to having to keep alive objects that have
+ *     finalizers deleting native objects.
+ * (c) When the graph has the debuggable property, environment uses
+ *     of an instruction that has a primitive type make the instruction live.
+ *     If the graph does not have the debuggable property, the environment
+ *     use has no effect, and may get a 'none' value after register allocation.
+ *
+ * (b) and (c) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment.
+ */
 class SsaLivenessAnalysis : public ValueObject {
  public:
   SsaLivenessAnalysis(const HGraph& graph, CodeGenerator* codegen)
@@ -882,6 +898,12 @@
   // Update the live_out set of the block and returns whether it has changed.
   bool UpdateLiveOut(const HBasicBlock& block);
 
+  static bool ShouldBeLiveForEnvironment(HInstruction* instruction) {
+    if (instruction == nullptr) return false;
+    if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
+    return instruction->GetType() == Primitive::kPrimNot;
+  }
+
   const HGraph& graph_;
   CodeGenerator* const codegen_;
   GrowableArray<HBasicBlock*> linear_order_;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 76ddbf3..863bab2 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -99,14 +99,20 @@
   }
 
   size_t ComputeNeededSize() const {
-    return CodeInfo::kFixedSize
-        + ComputeStackMapSize()
+    size_t size = CodeInfo::kFixedSize
+        + ComputeStackMapsSize()
         + ComputeDexRegisterMapsSize()
         + ComputeInlineInfoSize();
+    // On ARM, CodeInfo data must be 4-byte aligned.
+    return RoundUp(size, kWordAlignment);
   }
 
-  size_t ComputeStackMapSize() const {
-    return stack_maps_.Size() * StackMap::ComputeAlignedStackMapSize(stack_mask_max_);
+  size_t ComputeStackMaskSize() const {
+    return StackMaskEncodingSize(stack_mask_max_);
+  }
+
+  size_t ComputeStackMapsSize() const {
+    return stack_maps_.Size() * StackMap::ComputeStackMapSize(ComputeStackMaskSize());
   }
 
   // Compute the size of the Dex register map of `entry`.
@@ -129,8 +135,7 @@
       DexRegisterLocation entry = dex_register_maps_.Get(i);
       size += DexRegisterMap::EntrySize(entry);
     }
-    // On ARM, the Dex register maps must be 4-byte aligned.
-    return RoundUp(size, kWordAlignment);
+    return size;
   }
 
   // Compute the size of all the inline information pieces.
@@ -141,7 +146,7 @@
   }
 
   size_t ComputeDexRegisterMapsStart() const {
-    return CodeInfo::kFixedSize + ComputeStackMapSize();
+    return CodeInfo::kFixedSize + ComputeStackMapsSize();
   }
 
   size_t ComputeInlineInfoStart() const {
@@ -150,9 +155,10 @@
 
   void FillIn(MemoryRegion region) {
     CodeInfo code_info(region);
+    DCHECK_EQ(region.size(), ComputeNeededSize());
     code_info.SetOverallSize(region.size());
 
-    size_t stack_mask_size = StackMaskEncodingSize(stack_mask_max_);
+    size_t stack_mask_size = ComputeStackMaskSize();
     uint8_t* memory_start = region.start();
 
     MemoryRegion dex_register_maps_region = region.Subregion(
@@ -165,6 +171,7 @@
 
     code_info.SetNumberOfStackMaps(stack_maps_.Size());
     code_info.SetStackMaskSize(stack_mask_size);
+    DCHECK_EQ(code_info.StackMapsSize(), ComputeStackMapsSize());
 
     uintptr_t next_dex_register_map_offset = 0;
     uintptr_t next_inline_info_offset = 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9ae3b79..9512376 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1138,10 +1138,13 @@
           os << "\n\tlocals:";
         }
 
-        uint32_t offset = StackVisitor::GetVRegOffset(code_item, oat_method.GetCoreSpillMask(),
-                                                      oat_method.GetFpSpillMask(),
-                                                      oat_method.GetFrameSizeInBytes(), reg,
-                                                      GetInstructionSet());
+        uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode(
+            code_item,
+            oat_method.GetCoreSpillMask(),
+            oat_method.GetFpSpillMask(),
+            oat_method.GetFrameSizeInBytes(),
+            reg,
+            GetInstructionSet());
         os << " v" << reg << "[sp + #" << offset << "]";
       }
 
@@ -1170,10 +1173,13 @@
                                        : oat_method.GetCoreSpillMask();
         os << (is_float ? "fr" : "r") << vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
       } else {
-        uint32_t offset = StackVisitor::GetVRegOffset(code_item, oat_method.GetCoreSpillMask(),
-                                                      oat_method.GetFpSpillMask(),
-                                                      oat_method.GetFrameSizeInBytes(), reg,
-                                                      GetInstructionSet());
+        uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode(
+            code_item,
+            oat_method.GetCoreSpillMask(),
+            oat_method.GetFpSpillMask(),
+            oat_method.GetFrameSizeInBytes(),
+            reg,
+            GetInstructionSet());
         os << "[sp + #" << offset << "]";
       }
     }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 356a438..fbbc863 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -263,37 +263,118 @@
   result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
 }
 
+// Arraycopy emulation.
+// Note: we can't use any fast copy functions, as they are not available under transaction.
+
+template <typename T>
+static void PrimitiveArrayCopy(Thread* self,
+                               mirror::Array* src_array, int32_t src_pos,
+                               mirror::Array* dst_array, int32_t dst_pos,
+                               int32_t length)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
+    AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
+                           PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
+                           PrettyDescriptor(dst_array->GetClass()->GetComponentType()).c_str());
+    return;
+  }
+  mirror::PrimitiveArray<T>* src = down_cast<mirror::PrimitiveArray<T>*>(src_array);
+  mirror::PrimitiveArray<T>* dst = down_cast<mirror::PrimitiveArray<T>*>(dst_array);
+  const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= length);
+  if (copy_forward) {
+    for (int32_t i = 0; i < length; ++i) {
+      dst->Set(dst_pos + i, src->Get(src_pos + i));
+    }
+  } else {
+    for (int32_t i = 1; i <= length; ++i) {
+      dst->Set(dst_pos + length - i, src->Get(src_pos + length - i));
+    }
+  }
+}
+
 static void UnstartedSystemArraycopy(
     Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Special case array copying without initializing System.
-  mirror::Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
-  jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
-  jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
+  jint src_pos = shadow_frame->GetVReg(arg_offset + 1);
+  jint dst_pos = shadow_frame->GetVReg(arg_offset + 3);
   jint length = shadow_frame->GetVReg(arg_offset + 4);
-  if (!ctype->IsPrimitive()) {
-    mirror::ObjectArray<mirror::Object>* src = shadow_frame->GetVRegReference(arg_offset)->
-        AsObjectArray<mirror::Object>();
-    mirror::ObjectArray<mirror::Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->
-        AsObjectArray<mirror::Object>();
-    for (jint i = 0; i < length; ++i) {
-      dst->Set(dstPos + i, src->Get(srcPos + i));
+  mirror::Array* src_array = shadow_frame->GetVRegReference(arg_offset)->AsArray();
+  mirror::Array* dst_array = shadow_frame->GetVRegReference(arg_offset + 2)->AsArray();
+
+  // Null checking.
+  if (src_array == nullptr) {
+    AbortTransactionOrFail(self, "src is null in arraycopy.");
+    return;
+  }
+  if (dst_array == nullptr) {
+    AbortTransactionOrFail(self, "dst is null in arraycopy.");
+    return;
+  }
+
+  // Bounds checking.
+  if (UNLIKELY(src_pos < 0) || UNLIKELY(dst_pos < 0) || UNLIKELY(length < 0) ||
+      UNLIKELY(src_pos > src_array->GetLength() - length) ||
+      UNLIKELY(dst_pos > dst_array->GetLength() - length)) {
+    self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                             "src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
+                             src_array->GetLength(), src_pos, dst_array->GetLength(), dst_pos,
+                             length);
+    AbortTransactionOrFail(self, "Index out of bounds.");
+    return;
+  }
+
+  // Type checking.
+  mirror::Class* src_type = shadow_frame->GetVRegReference(arg_offset)->GetClass()->
+      GetComponentType();
+
+  if (!src_type->IsPrimitive()) {
+    // Check that the second type is not primitive.
+    mirror::Class* trg_type = shadow_frame->GetVRegReference(arg_offset + 2)->GetClass()->
+        GetComponentType();
+    if (trg_type->IsPrimitiveInt()) {
+      AbortTransactionOrFail(self, "Type mismatch in arraycopy: %s vs %s",
+                             PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
+                             PrettyDescriptor(dst_array->GetClass()->GetComponentType()).c_str());
+      return;
     }
-  } else if (ctype->IsPrimitiveChar()) {
-    mirror::CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
-    mirror::CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
-    for (jint i = 0; i < length; ++i) {
-      dst->Set(dstPos + i, src->Get(srcPos + i));
+
+    // For simplicity only do this if the component types are the same. Otherwise we have to copy
+    // even more code from the object-array functions.
+    if (src_type != trg_type) {
+      AbortTransactionOrFail(self, "Types not the same in arraycopy: %s vs %s",
+                             PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
+                             PrettyDescriptor(dst_array->GetClass()->GetComponentType()).c_str());
+      return;
     }
-  } else if (ctype->IsPrimitiveInt()) {
-    mirror::IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
-    mirror::IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
-    for (jint i = 0; i < length; ++i) {
-      dst->Set(dstPos + i, src->Get(srcPos + i));
+
+    mirror::ObjectArray<mirror::Object>* src = src_array->AsObjectArray<mirror::Object>();
+    mirror::ObjectArray<mirror::Object>* dst = dst_array->AsObjectArray<mirror::Object>();
+    if (src == dst) {
+      // Can overlap, but not have type mismatches.
+      const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= length);
+      if (copy_forward) {
+        for (int32_t i = 0; i < length; ++i) {
+          dst->Set(dst_pos + i, src->Get(src_pos + i));
+        }
+      } else {
+        for (int32_t i = 1; i <= length; ++i) {
+          dst->Set(dst_pos + length - i, src->Get(src_pos + length - i));
+        }
+      }
+    } else {
+      // Can't overlap. Would need type checks, but we abort above.
+      for (int32_t i = 0; i < length; ++i) {
+        dst->Set(dst_pos + i, src->Get(src_pos + i));
+      }
     }
+  } else if (src_type->IsPrimitiveChar()) {
+    PrimitiveArrayCopy<uint16_t>(self, src_array, src_pos, dst_array, dst_pos, length);
+  } else if (src_type->IsPrimitiveInt()) {
+    PrimitiveArrayCopy<int32_t>(self, src_array, src_pos, dst_array, dst_pos, length);
   } else {
     AbortTransactionOrFail(self, "Unimplemented System.arraycopy for type '%s'",
-                           PrettyDescriptor(ctype).c_str());
+                           PrettyDescriptor(src_type).c_str());
   }
 }
 
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d41d37e..1a80ded 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1043,8 +1043,11 @@
     }
 
     uint16_t monitor_register = ((monitor_enter_instruction >> 8) & 0xff);
-    mirror::Object* o = reinterpret_cast<mirror::Object*>(
-        stack_visitor->GetVReg(m, monitor_register, kReferenceVReg));
+    uint32_t value;
+    bool success = stack_visitor->GetVReg(m, monitor_register, kReferenceVReg, &value);
+    CHECK(success) << "Failed to read v" << monitor_register << " of kind "
+                   << kReferenceVReg << " in method " << PrettyMethod(m);
+    mirror::Object* o = reinterpret_cast<mirror::Object*>(value);
     callback(o, callback_context);
   }
 }
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 0eb8eca..2432603 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -205,52 +205,97 @@
     ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, h_method.Get(), dex_pc);
     self_->SetShadowFrameUnderConstruction(new_frame);
     const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
+
+    // Markers for dead values, used when the verifier knows a Dex register is undefined,
+    // or when the compiler knows the register has not been initialized, or is not used
+    // anymore in the method.
+    static constexpr uint32_t kDeadValue = 0xEBADDE09;
+    static constexpr uint64_t kLongDeadValue = 0xEBADDE09EBADDE09;
     for (uint16_t reg = 0; reg < num_regs; ++reg) {
       VRegKind kind = GetVRegKind(reg, kinds);
       switch (kind) {
         case kUndefined:
-          new_frame->SetVReg(reg, 0xEBADDE09);
+          new_frame->SetVReg(reg, kDeadValue);
           break;
         case kConstant:
           new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
           break;
-        case kReferenceVReg:
-          new_frame->SetVRegReference(reg,
-                                      reinterpret_cast<mirror::Object*>(GetVReg(h_method.Get(),
-                                                                                reg, kind)));
+        case kReferenceVReg: {
+          uint32_t value = 0;
+          if (GetVReg(h_method.Get(), reg, kind, &value)) {
+            new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value));
+          } else {
+            new_frame->SetVReg(reg, kDeadValue);
+          }
           break;
+        }
         case kLongLoVReg:
           if (GetVRegKind(reg + 1, kinds) == kLongHiVReg) {
             // Treat it as a "long" register pair.
-            new_frame->SetVRegLong(reg, GetVRegPair(h_method.Get(), reg, kLongLoVReg, kLongHiVReg));
+            uint64_t value = 0;
+            if (GetVRegPair(h_method.Get(), reg, kLongLoVReg, kLongHiVReg, &value)) {
+              new_frame->SetVRegLong(reg, value);
+            } else {
+              new_frame->SetVRegLong(reg, kLongDeadValue);
+            }
           } else {
-            new_frame->SetVReg(reg, GetVReg(h_method.Get(), reg, kind));
+            uint32_t value = 0;
+            if (GetVReg(h_method.Get(), reg, kind, &value)) {
+              new_frame->SetVReg(reg, value);
+            } else {
+              new_frame->SetVReg(reg, kDeadValue);
+            }
           }
           break;
         case kLongHiVReg:
           if (GetVRegKind(reg - 1, kinds) == kLongLoVReg) {
             // Nothing to do: we treated it as a "long" register pair.
           } else {
-            new_frame->SetVReg(reg, GetVReg(h_method.Get(), reg, kind));
+            uint32_t value = 0;
+            if (GetVReg(h_method.Get(), reg, kind, &value)) {
+              new_frame->SetVReg(reg, value);
+            } else {
+              new_frame->SetVReg(reg, kDeadValue);
+            }
           }
           break;
         case kDoubleLoVReg:
           if (GetVRegKind(reg + 1, kinds) == kDoubleHiVReg) {
-            // Treat it as a "double" register pair.
-            new_frame->SetVRegLong(reg, GetVRegPair(h_method.Get(), reg, kDoubleLoVReg, kDoubleHiVReg));
+            uint64_t value = 0;
+            if (GetVRegPair(h_method.Get(), reg, kDoubleLoVReg, kDoubleHiVReg, &value)) {
+              // Treat it as a "double" register pair.
+              new_frame->SetVRegLong(reg, value);
+            } else {
+              new_frame->SetVRegLong(reg, kLongDeadValue);
+            }
           } else {
-            new_frame->SetVReg(reg, GetVReg(h_method.Get(), reg, kind));
+            uint32_t value = 0;
+            if (GetVReg(h_method.Get(), reg, kind, &value)) {
+              new_frame->SetVReg(reg, value);
+            } else {
+              new_frame->SetVReg(reg, kDeadValue);
+            }
           }
           break;
         case kDoubleHiVReg:
           if (GetVRegKind(reg - 1, kinds) == kDoubleLoVReg) {
             // Nothing to do: we treated it as a "double" register pair.
           } else {
-            new_frame->SetVReg(reg, GetVReg(h_method.Get(), reg, kind));
+            uint32_t value = 0;
+            if (GetVReg(h_method.Get(), reg, kind, &value)) {
+              new_frame->SetVReg(reg, value);
+            } else {
+              new_frame->SetVReg(reg, kDeadValue);
+            }
           }
           break;
         default:
-          new_frame->SetVReg(reg, GetVReg(h_method.Get(), reg, kind));
+          uint32_t value = 0;
+          if (GetVReg(h_method.Get(), reg, kind, &value)) {
+            new_frame->SetVReg(reg, value);
+          } else {
+            new_frame->SetVReg(reg, kDeadValue);
+          }
           break;
       }
     }
diff --git a/runtime/stack.cc b/runtime/stack.cc
index e420c57..47b85ad 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -129,12 +129,6 @@
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
     }
-  } else if (m->IsOptimized(GetInstructionSetPointerSize(
-      Runtime::Current()->GetInstructionSet()))) {
-    // TODO: Implement, currently only used for exceptions when jdwp is enabled.
-    UNIMPLEMENTED(WARNING)
-        << "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
-    return nullptr;
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     if (code_item == nullptr) {
@@ -143,7 +137,11 @@
       return nullptr;
     } else {
       uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
-      return reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
+      uint32_t value = 0;
+      bool success = GetVReg(m, reg, kReferenceVReg, &value);
+      // We currently always guarantee the `this` object is live throughout the method.
+      CHECK(success) << "Failed to read the this object in " << PrettyMethod(m);
+      return reinterpret_cast<mirror::Object*>(value);
     }
   }
 }
@@ -187,8 +185,8 @@
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
                                                       // its instructions?
-    *val = *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
-                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+    *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+                                     frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
     return true;
   }
 }
@@ -297,8 +295,9 @@
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
                                                       // its instructions?
-    uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
-                                 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+    uint32_t* addr = GetVRegAddrFromQuickCode(
+        cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
     *val = *reinterpret_cast<uint64_t*>(addr);
     return true;
   }
@@ -371,8 +370,9 @@
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
                                                       // its instructions?
-    uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
-                                 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+    uint32_t* addr = GetVRegAddrFromQuickCode(
+        cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
     *addr = new_value;
     return true;
   }
@@ -500,8 +500,9 @@
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be NULL or how would we compile
                                                       // its instructions?
-    uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
-                                 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+    uint32_t* addr = GetVRegAddrFromQuickCode(
+        cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
     *reinterpret_cast<uint64_t*>(addr) = new_value;
     return true;
   }
diff --git a/runtime/stack.h b/runtime/stack.h
index 13bd47f..aab54ba 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -486,29 +486,10 @@
   bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint32_t GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    uint32_t val;
-    bool success = GetVReg(m, vreg, kind, &val);
-    CHECK(success) << "Failed to read v" << vreg << " of kind " << kind << " in method "
-                   << PrettyMethod(m);
-    return val;
-  }
-
   bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
                    uint64_t* val) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  uint64_t GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
-                       VRegKind kind_hi) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    uint64_t val;
-    bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
-    CHECK(success) << "Failed to read vreg pair " << vreg
-                   << " of kind [" << kind_lo << "," << kind_hi << "] in method "
-                   << PrettyMethod(m);
-    return val;
-  }
-
   bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -519,11 +500,12 @@
   uintptr_t* GetGPRAddress(uint32_t reg) const;
 
   // This is a fast-path for getting/setting values in a quick frame.
-  uint32_t* GetVRegAddr(StackReference<mirror::ArtMethod>* cur_quick_frame,
-                        const DexFile::CodeItem* code_item,
-                        uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
-                        uint16_t vreg) const {
-    int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
+  uint32_t* GetVRegAddrFromQuickCode(StackReference<mirror::ArtMethod>* cur_quick_frame,
+                                     const DexFile::CodeItem* code_item,
+                                     uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
+                                     uint16_t vreg) const {
+    int offset = GetVRegOffsetFromQuickCode(
+        code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
     DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame());
     uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset;
     return reinterpret_cast<uint32_t*>(vreg_addr);
@@ -582,9 +564,9 @@
    *     | StackReference<ArtMethod>     |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
    *     +===============================+
    */
-  static int GetVRegOffset(const DexFile::CodeItem* code_item,
-                           uint32_t core_spills, uint32_t fp_spills,
-                           size_t frame_size, int reg, InstructionSet isa) {
+  static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
+                                        uint32_t core_spills, uint32_t fp_spills,
+                                        size_t frame_size, int reg, InstructionSet isa) {
     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
     DCHECK_NE(reg, -1);
     int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index e88820f..8ebafc5 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -46,19 +46,19 @@
   explicit InlineInfo(MemoryRegion region) : region_(region) {}
 
   uint8_t GetDepth() const {
-    return region_.Load<uint8_t>(kDepthOffset);
+    return region_.LoadUnaligned<uint8_t>(kDepthOffset);
   }
 
   void SetDepth(uint8_t depth) {
-    region_.Store<uint8_t>(kDepthOffset, depth);
+    region_.StoreUnaligned<uint8_t>(kDepthOffset, depth);
   }
 
   uint32_t GetMethodReferenceIndexAtDepth(uint8_t depth) const {
-    return region_.Load<uint32_t>(kFixedSize + depth * SingleEntrySize());
+    return region_.LoadUnaligned<uint32_t>(kFixedSize + depth * SingleEntrySize());
   }
 
   void SetMethodReferenceIndexAtDepth(uint8_t depth, uint32_t index) {
-    region_.Store<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
+    region_.StoreUnaligned<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
   }
 
   static size_t SingleEntrySize() {
@@ -466,43 +466,43 @@
   explicit StackMap(MemoryRegion region) : region_(region) {}
 
   uint32_t GetDexPc() const {
-    return region_.Load<uint32_t>(kDexPcOffset);
+    return region_.LoadUnaligned<uint32_t>(kDexPcOffset);
   }
 
   void SetDexPc(uint32_t dex_pc) {
-    region_.Store<uint32_t>(kDexPcOffset, dex_pc);
+    region_.StoreUnaligned<uint32_t>(kDexPcOffset, dex_pc);
   }
 
   uint32_t GetNativePcOffset() const {
-    return region_.Load<uint32_t>(kNativePcOffsetOffset);
+    return region_.LoadUnaligned<uint32_t>(kNativePcOffsetOffset);
   }
 
   void SetNativePcOffset(uint32_t native_pc_offset) {
-    region_.Store<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
+    region_.StoreUnaligned<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
   }
 
   uint32_t GetDexRegisterMapOffset() const {
-    return region_.Load<uint32_t>(kDexRegisterMapOffsetOffset);
+    return region_.LoadUnaligned<uint32_t>(kDexRegisterMapOffsetOffset);
   }
 
   void SetDexRegisterMapOffset(uint32_t offset) {
-    region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+    region_.StoreUnaligned<uint32_t>(kDexRegisterMapOffsetOffset, offset);
   }
 
   uint32_t GetInlineDescriptorOffset() const {
-    return region_.Load<uint32_t>(kInlineDescriptorOffsetOffset);
+    return region_.LoadUnaligned<uint32_t>(kInlineDescriptorOffsetOffset);
   }
 
   void SetInlineDescriptorOffset(uint32_t offset) {
-    region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+    region_.StoreUnaligned<uint32_t>(kInlineDescriptorOffsetOffset, offset);
   }
 
   uint32_t GetRegisterMask() const {
-    return region_.Load<uint32_t>(kRegisterMaskOffset);
+    return region_.LoadUnaligned<uint32_t>(kRegisterMaskOffset);
   }
 
   void SetRegisterMask(uint32_t mask) {
-    region_.Store<uint32_t>(kRegisterMaskOffset, mask);
+    region_.StoreUnaligned<uint32_t>(kRegisterMaskOffset, mask);
   }
 
   MemoryRegion GetStackMask() const {
@@ -529,9 +529,8 @@
        && region_.size() == other.region_.size();
   }
 
-  static size_t ComputeAlignedStackMapSize(size_t stack_mask_size) {
-    // On ARM, the stack maps must be 4-byte aligned.
-    return RoundUp(StackMap::kFixedSize + stack_mask_size, kWordAlignment);
+  static size_t ComputeStackMapSize(size_t stack_mask_size) {
+    return StackMap::kFixedSize + stack_mask_size;
   }
 
   // Special (invalid) offset for the DexRegisterMapOffset field meaning
@@ -606,8 +605,15 @@
     region_.Store<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
   }
 
+  // Get the size of one stack map of this CodeInfo object, in bytes.
+  // All stack maps of a CodeInfo have the same size.
   size_t StackMapSize() const {
-    return StackMap::ComputeAlignedStackMapSize(GetStackMaskSize());
+    return StackMap::ComputeStackMapSize(GetStackMaskSize());
+  }
+
+  // Get the size all the stack maps of this CodeInfo object, in bytes.
+  size_t StackMapsSize() const {
+    return StackMapSize() * GetNumberOfStackMaps();
   }
 
   uint32_t GetStackMapsOffset() const {
@@ -663,7 +669,7 @@
   MemoryRegion GetStackMaps() const {
     return region_.size() == 0
         ? MemoryRegion()
-        : region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
+        : region_.Subregion(kFixedSize, StackMapsSize());
   }
 
   // Compute the size of a Dex register map starting at offset `origin` in
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e8e9355..8e98d53 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2216,9 +2216,9 @@
                 }
               } else {
                 StackReference<mirror::Object>* ref_addr =
-                    reinterpret_cast<StackReference<mirror::Object>*>(
-                        GetVRegAddr(cur_quick_frame, code_item, frame_info.CoreSpillMask(),
-                                    frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
+                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
+                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
+                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
                 mirror::Object* ref = ref_addr->AsMirrorPtr();
                 if (ref != nullptr) {
                   mirror::Object* new_ref = ref;
diff --git a/runtime/utils.h b/runtime/utils.h
index cd04c3f..9a9f51a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -333,7 +333,7 @@
 // Tests whether 's' starts with 'prefix'.
 bool StartsWith(const std::string& s, const char* prefix);
 
-// Tests whether 's' starts with 'suffix'.
+// Tests whether 's' ends with 'suffix'.
 bool EndsWith(const std::string& s, const char* suffix);
 
 // Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 8fdeccc..1f8df1d 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -33,7 +33,7 @@
 14 (class java.lang.Short)
 [public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
 [private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private final char[] java.lang.String.value, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
-[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public volatile int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static transient java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static transient java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
+[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
 []
 [interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
 0
diff --git a/test/461-get-reference-vreg/expected.txt b/test/461-get-reference-vreg/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/461-get-reference-vreg/expected.txt
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
new file mode 100644
index 0000000..f0b78e1
--- /dev/null
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "jni.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+  TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : StackVisitor(thread, context), this_value_(this_value), found_method_index_(0) {}
+
+  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = GetMethod();
+    std::string m_name(m->GetName());
+
+    if (m_name.compare("testThisWithInstanceCall") == 0) {
+      found_method_index_ = 1;
+      uint32_t value = 0;
+      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+      CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
+      CHECK_EQ(GetThisObject(), this_value_);
+    } else if (m_name.compare("testThisWithStaticCall") == 0) {
+      found_method_index_ = 2;
+      uint32_t value = 0;
+      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+    } else if (m_name.compare("testParameter") == 0) {
+      found_method_index_ = 3;
+      uint32_t value = 0;
+      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+    } else if (m_name.compare("testObjectInScope") == 0) {
+      found_method_index_ = 4;
+      uint32_t value = 0;
+      CHECK(GetVReg(m, 0, kReferenceVReg, &value));
+    }
+
+    return true;
+  }
+
+  mirror::Object* this_value_;
+
+  // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
+  // have been found and tested.
+  jint found_method_index_;
+};
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<Context> context(Context::Create());
+  TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object*>(value));
+  visitor.WalkStack();
+  return visitor.found_method_index_;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_doStaticNativeCallRef(JNIEnv*, jclass) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<Context> context(Context::Create());
+  TestVisitor visitor(soa.Self(), context.get(), nullptr);
+  visitor.WalkStack();
+  return visitor.found_method_index_;
+}
+
+}  // namespace
+
+}  // namespace art
diff --git a/test/461-get-reference-vreg/info.txt b/test/461-get-reference-vreg/info.txt
new file mode 100644
index 0000000..1e5e971
--- /dev/null
+++ b/test/461-get-reference-vreg/info.txt
@@ -0,0 +1 @@
+Tests for inspecting DEX registers holding references.
diff --git a/test/461-get-reference-vreg/src/Main.java b/test/461-get-reference-vreg/src/Main.java
new file mode 100644
index 0000000..a94c6fb
--- /dev/null
+++ b/test/461-get-reference-vreg/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public Main() {
+  }
+
+  int testThisWithInstanceCall() {
+    return doNativeCallRef();
+  }
+
+  int testThisWithStaticCall() {
+    return doStaticNativeCallRef();
+  }
+
+  static int testParameter(Object a) {
+    return doStaticNativeCallRef();
+  }
+
+  static int testObjectInScope() {
+    Object a = array[0];
+    return doStaticNativeCallRef();
+  }
+
+  native int doNativeCallRef();
+  static native int doStaticNativeCallRef();
+
+  static {
+    System.loadLibrary("arttest");
+  }
+
+  public static void main(String[] args) {
+    Main rm = new Main();
+    if (rm.testThisWithInstanceCall() != 1) {
+      throw new Error("Expected 1");
+    }
+
+    if (rm.testThisWithStaticCall() != 2) {
+      throw new Error("Expected 2");
+    }
+
+    if (testParameter(new Object()) != 3) {
+      throw new Error("Expected 3");
+    }
+
+    if (testObjectInScope() != 4) {
+      throw new Error("Expected 4");
+    }
+  }
+
+  static Object[] array = new Object[] { new Object() };
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index f4bab3f..0cafb06 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -30,7 +30,8 @@
   118-noimage-dex2oat/noimage-dex2oat.cc \
   454-get-vreg/get_vreg_jni.cc \
   455-set-vreg/set_vreg_jni.cc \
-  457-regs/regs_jni.cc
+  457-regs/regs_jni.cc \
+  461-get-reference-vreg/get_reference_vreg_jni.cc
 
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
 ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index c666d35..f84e0fb 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -312,6 +312,7 @@
   454-get-vreg \
   455-set-vreg \
   457-regs \
+  461-get-reference-vreg \
 
 ifneq (,$(filter ndebug,$(RUN_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
@@ -367,6 +368,8 @@
 
 # Tests that should fail when the optimizing compiler compiles them non-debuggable.
 TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
+  454-get-vreg \
+  455-set-vreg \
   457-regs \
 
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -377,6 +380,20 @@
 
 TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS :=
 
+# Tests that should fail when the optimizing compiler compiles them debuggable.
+TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS := \
+  441-checker-inliner \
+  446-checker-inliner2 \
+  447-checker-inliner3 \
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+      optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS :=
+
 
 # Clear variables ahead of appending to them when defining tests.
 $(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))