Merge "Infrastructure for obsolete methods"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9902628..1691dbb 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -84,6 +84,7 @@
 # Dex file dependencies for each gtest.
 ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
 
+ART_GTEST_atomic_method_ref_map_test_DEX_DEPS := Interfaces
 ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
 ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
 ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 1737376..db55ea0 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -53,6 +53,7 @@
         "optimizing/code_generator_utils.cc",
         "optimizing/constant_folding.cc",
         "optimizing/dead_code_elimination.cc",
+        "optimizing/escape.cc",
         "optimizing/graph_checker.cc",
         "optimizing/graph_visualizer.cc",
         "optimizing/gvn.cc",
@@ -348,6 +349,7 @@
         "optimizing/ssa_test.cc",
         "optimizing/stack_map_test.cc",
         "optimizing/suspend_check_test.cc",
+        "utils/atomic_method_ref_map_test.cc",
         "utils/dedupe_set_test.cc",
         "utils/intrusive_forward_list_test.cc",
         "utils/string_reference_test.cc",
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 174e85e..bbf9eee 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -315,11 +315,11 @@
     return target_dex_file_;
   }
 
-  uint32_t TargetStringIndex() const {
+  dex::StringIndex TargetStringIndex() const {
     DCHECK(patch_type_ == Type::kString ||
            patch_type_ == Type::kStringRelative ||
            patch_type_ == Type::kStringBssEntry);
-    return string_idx_;
+    return dex::StringIndex(string_idx_);
   }
 
   const DexFile* TargetDexCacheDexFile() const {
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 0a4f094..30d4b47 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -53,7 +53,7 @@
       uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
       for (uint32_t i = 0; i < parameters_size; ++i) {
         uint32_t id = DecodeUnsignedLeb128P1(&stream);
-        names.push_back(mi->dex_file->StringDataByIdx(id));
+        names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id)));
       }
     }
   }
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 3fb10d8..669d8cd 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -23,6 +23,7 @@
 #include "driver/compiler_options.h"
 #include "thread.h"
 #include "thread-inl.h"
+#include "utils/atomic_method_ref_map-inl.h"
 #include "verified_method.h"
 #include "verifier/method_verifier-inl.h"
 
@@ -35,8 +36,11 @@
 
 VerificationResults::~VerificationResults() {
   WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
-  DeleteResults(preregistered_dex_files_);
   STLDeleteValues(&verified_methods_);
+  atomic_verified_methods_.Visit([](const MethodReference& ref ATTRIBUTE_UNUSED,
+                                    const VerifiedMethod* method) {
+    delete method;
+  });
 }
 
 void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
@@ -49,16 +53,17 @@
     // We'll punt this later.
     return;
   }
-  bool inserted;
-  DexFileMethodArray* const array = GetMethodArray(ref.dex_file);
+  AtomicMap::InsertResult result = atomic_verified_methods_.Insert(ref,
+                                                                   /*expected*/ nullptr,
+                                                                   verified_method.get());
   const VerifiedMethod* existing = nullptr;
-  if (array != nullptr) {
-    DCHECK(array != nullptr);
-    Atomic<const VerifiedMethod*>* slot = &(*array)[ref.dex_method_index];
-    inserted = slot->CompareExchangeStrongSequentiallyConsistent(nullptr, verified_method.get());
+  bool inserted;
+  if (result != AtomicMap::kInsertResultInvalidDexFile) {
+    inserted = (result == AtomicMap::kInsertResultSuccess);
     if (!inserted) {
-      existing = slot->LoadSequentiallyConsistent();
-      DCHECK_NE(verified_method.get(), existing);
+      // Rare case.
+      CHECK(atomic_verified_methods_.Get(ref, &existing));
+      CHECK_NE(verified_method.get(), existing);
     }
   } else {
     WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
@@ -89,9 +94,9 @@
 }
 
 const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
-  DexFileMethodArray* array = GetMethodArray(ref.dex_file);
-  if (array != nullptr) {
-    return (*array)[ref.dex_method_index].LoadRelaxed();
+  const VerifiedMethod* ret = nullptr;
+  if (atomic_verified_methods_.Get(ref, &ret)) {
+    return ret;
   }
   ReaderMutexLock mu(Thread::Current(), verified_methods_lock_);
   auto it = verified_methods_.find(ref);
@@ -124,10 +129,8 @@
   return true;
 }
 
-void VerificationResults::PreRegisterDexFile(const DexFile* dex_file) {
-  CHECK(preregistered_dex_files_.find(dex_file) == preregistered_dex_files_.end())
-      << dex_file->GetLocation();
-  DexFileMethodArray array(dex_file->NumMethodIds());
+void VerificationResults::AddDexFile(const DexFile* dex_file) {
+  atomic_verified_methods_.AddDexFile(dex_file);
   WriterMutexLock mu(Thread::Current(), verified_methods_lock_);
   // There can be some verified methods that are already registered for the dex_file since we set
   // up well known classes earlier. Remove these and put them in the array so that we don't
@@ -135,31 +138,13 @@
   for (auto it = verified_methods_.begin(); it != verified_methods_.end(); ) {
     MethodReference ref = it->first;
     if (ref.dex_file == dex_file) {
-      array[ref.dex_method_index].StoreSequentiallyConsistent(it->second);
+      CHECK(atomic_verified_methods_.Insert(ref, nullptr, it->second) ==
+          AtomicMap::kInsertResultSuccess);
       it = verified_methods_.erase(it);
     } else {
       ++it;
     }
   }
-  preregistered_dex_files_.emplace(dex_file, std::move(array));
-}
-
-void VerificationResults::DeleteResults(DexFileResults& array) {
-  for (auto& pair : array) {
-    for (Atomic<const VerifiedMethod*>& method : pair.second) {
-      delete method.LoadSequentiallyConsistent();
-    }
-  }
-  array.clear();
-}
-
-VerificationResults::DexFileMethodArray* VerificationResults::GetMethodArray(
-    const DexFile* dex_file) {
-  auto it = preregistered_dex_files_.find(dex_file);
-  if (it != preregistered_dex_files_.end()) {
-    return &it->second;
-  }
-  return nullptr;
 }
 
 }  // namespace art
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index b3356e0..ea38f4d 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -26,6 +26,7 @@
 #include "class_reference.h"
 #include "method_reference.h"
 #include "safe_map.h"
+#include "utils/atomic_method_ref_map.h"
 
 namespace art {
 
@@ -54,26 +55,22 @@
 
   bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags);
 
-  // Add a dex file array to the preregistered_dex_files_ array. These dex files require no locks to
-  // access. It is not safe to call if other callers are calling GetVerifiedMethod concurrently.
-  void PreRegisterDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
+  // Add a dex file to enable using the atomic map.
+  void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
 
  private:
   // Verified methods. The method array is fixed to avoid needing a lock to extend it.
-  using DexFileMethodArray = dchecked_vector<Atomic<const VerifiedMethod*>>;
-  using DexFileResults = std::map<const DexFile*, DexFileMethodArray>;
+  using AtomicMap = AtomicMethodRefMap<const VerifiedMethod*>;
   using VerifiedMethodMap = SafeMap<MethodReference,
                                     const VerifiedMethod*,
                                     MethodReferenceComparator>;
 
-  static void DeleteResults(DexFileResults& array);
-
-  DexFileMethodArray* GetMethodArray(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
   VerifiedMethodMap verified_methods_ GUARDED_BY(verified_methods_lock_);
   const CompilerOptions* const compiler_options_;
 
-  // Dex2oat can preregister dex files to avoid locking when calling GetVerifiedMethod.
-  DexFileResults preregistered_dex_files_;
+  // Dex2oat can add dex files to atomic_verified_methods_ to avoid locking when calling
+  // GetVerifiedMethod.
+  AtomicMap atomic_verified_methods_;
 
   ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ad75ec4..42e5db3 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -842,9 +842,9 @@
     switch (inst->Opcode()) {
       case Instruction::CONST_STRING:
       case Instruction::CONST_STRING_JUMBO: {
-        uint32_t string_index = (inst->Opcode() == Instruction::CONST_STRING)
+        dex::StringIndex string_index((inst->Opcode() == Instruction::CONST_STRING)
             ? inst->VRegB_21c()
-            : inst->VRegB_31c();
+            : inst->VRegB_31c());
         mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
         CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
         break;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index f40c712..12684c0 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -111,7 +111,7 @@
   ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(soa.Self(), dex);
   EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
   for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
-    const mirror::String* string = dex_cache->GetResolvedString(i);
+    const mirror::String* string = dex_cache->GetResolvedString(dex::StringIndex(i));
     EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
   }
   EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a706697..fb5560b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1451,7 +1451,8 @@
     InternTable* const intern_table = runtime->GetInternTable();
     for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
       uint32_t utf16_length;
-      const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(i, &utf16_length);
+      const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i),
+                                                                      &utf16_length);
       mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data).Ptr();
       TryAssignBinSlot(work_stack, string, oat_index);
     }
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 0151789..233daf4 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -163,7 +163,8 @@
                                                offset + patch.LiteralOffset(),
                                                target_offset);
           } else if (patch.GetType() == LinkerPatch::Type::kStringRelative) {
-            uint32_t target_offset = string_index_to_offset_map_.Get(patch.TargetStringIndex());
+            uint32_t target_offset =
+                string_index_to_offset_map_.Get(patch.TargetStringIndex().index_);
             patcher_->PatchPcRelativeReference(&patched_code_,
                                                patch,
                                                offset + patch.LiteralOffset(),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8a6b94e..1f59816 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -430,7 +430,7 @@
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     HLoadString* load = instruction_->AsLoadString();
-    const uint32_t string_index = load->GetStringIndex();
+    const uint32_t string_index = load->GetStringIndex().index_;
     Register out = locations->Out().AsRegister<Register>();
     Register temp = locations->GetTemp(0).AsRegister<Register>();
     constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -5946,7 +5946,7 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
       CodeGeneratorARM::PcRelativePatchInfo* labels =
-          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
       __ BindTrackedLabel(&labels->movw_label);
       __ movw(out, /* placeholder */ 0u);
       __ BindTrackedLabel(&labels->movt_label);
@@ -5965,7 +5965,7 @@
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       Register temp = locations->GetTemp(0).AsRegister<Register>();
       CodeGeneratorARM::PcRelativePatchInfo* labels =
-          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
       __ BindTrackedLabel(&labels->movw_label);
       __ movw(temp, /* placeholder */ 0u);
       __ BindTrackedLabel(&labels->movt_label);
@@ -5994,7 +5994,7 @@
   DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
   InvokeRuntimeCallingConvention calling_convention;
   DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
-  __ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+  __ LoadImmediate(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
 }
@@ -7340,7 +7340,7 @@
 }
 
 Literal* CodeGeneratorARM::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
-                                                             uint32_t string_index) {
+                                                             dex::StringIndex string_index) {
   return boot_image_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -7364,7 +7364,7 @@
 }
 
 Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
-                                                       uint32_t string_index) {
+                                                       dex::StringIndex string_index) {
   jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
@@ -7436,7 +7436,7 @@
     uint32_t literal_offset = literal->GetLabel()->Position();
     linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
                                                        target_string.dex_file,
-                                                       target_string.string_index));
+                                                       target_string.string_index.index_));
   }
   if (!GetCompilerOptions().IsBootImage()) {
     EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a4ccb57..8230512 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -485,11 +485,12 @@
   PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
   PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
                                                        uint32_t element_offset);
-  Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
+  Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+                                             dex::StringIndex string_index);
   Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
   Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
   Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
-  Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, uint32_t string_index);
+  Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
 
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a78b3da..ab6a33f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -349,7 +349,7 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
     __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
     arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -4132,7 +4132,7 @@
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
-    const DexFile& dex_file, uint32_t string_index) {
+    const DexFile& dex_file, dex::StringIndex string_index) {
   return boot_image_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4158,7 +4158,7 @@
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
-    const DexFile& dex_file, uint32_t string_index) {
+    const DexFile& dex_file, dex::StringIndex string_index) {
   jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
   return jit_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
@@ -4246,7 +4246,7 @@
     vixl::aarch64::Literal<uint32_t>* literal = entry.second;
     linker_patches->push_back(LinkerPatch::StringPatch(literal->GetOffset(),
                                                        target_string.dex_file,
-                                                       target_string.string_index));
+                                                       target_string.string_index.index_));
   }
   if (!GetCompilerOptions().IsBootImage()) {
     EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
@@ -4594,7 +4594,7 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
       // Add ADRP with its PC-relative String patch.
       const DexFile& dex_file = load->GetDexFile();
-      uint32_t string_index = load->GetStringIndex();
+      uint32_t string_index = load->GetStringIndex().index_;
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
       vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
       codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
@@ -4612,7 +4612,7 @@
     case HLoadString::LoadKind::kBssEntry: {
       // Add ADRP with its PC-relative String .bss entry patch.
       const DexFile& dex_file = load->GetDexFile();
-      uint32_t string_index = load->GetStringIndex();
+      uint32_t string_index = load->GetStringIndex().index_;
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
       Register temp = temps.AcquireX();
@@ -4653,7 +4653,7 @@
   // TODO: Re-add the compiler code to do string dex cache lookup again.
   InvokeRuntimeCallingConvention calling_convention;
   DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), out.GetCode());
-  __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex());
+  __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
 }
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 1545fd3..868c8b0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -560,14 +560,15 @@
       uint32_t element_offset,
       vixl::aarch64::Label* adrp_label = nullptr);
 
-  vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
-                                                                      uint32_t string_index);
+  vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageStringLiteral(
+      const DexFile& dex_file,
+      dex::StringIndex string_index);
   vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
                                                                     dex::TypeIndex type_index);
   vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
   vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
   vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
-                                                                uint32_t string_index);
+                                                                dex::StringIndex string_index);
 
   void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
   void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index e399f32..aa8a77e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2215,10 +2215,9 @@
       break;
     }
 
-    // TODO(VIXL): https://android-review.googlesource.com/#/c/254144/
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetInAt(1, ArmEncodableConstantOrRegister(add->InputAt(1), ADD));
       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
       break;
     }
@@ -2248,11 +2247,15 @@
       }
       break;
 
-    // TODO(VIXL): https://android-review.googlesource.com/#/c/254144/
     case Primitive::kPrimLong: {
-      DCHECK(second.IsRegisterPair());
-      __ Adds(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second));
-      __ Adc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second));
+      if (second.IsConstant()) {
+        uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+        GenerateAddLongConst(out, first, value);
+      } else {
+        DCHECK(second.IsRegisterPair());
+        __ Adds(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second));
+        __ Adc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second));
+      }
       break;
     }
 
@@ -2277,10 +2280,9 @@
       break;
     }
 
-    // TODO(VIXL): https://android-review.googlesource.com/#/c/254144/
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetInAt(1, ArmEncodableConstantOrRegister(sub->InputAt(1), SUB));
       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
       break;
     }
@@ -2307,11 +2309,15 @@
       break;
     }
 
-    // TODO(VIXL): https://android-review.googlesource.com/#/c/254144/
     case Primitive::kPrimLong: {
-      DCHECK(second.IsRegisterPair());
-      __ Subs(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second));
-      __ Sbc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second));
+      if (second.IsConstant()) {
+        uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+        GenerateAddLongConst(out, first, -value);
+      } else {
+        DCHECK(second.IsRegisterPair());
+        __ Subs(LowRegisterFrom(out), LowRegisterFrom(first), LowRegisterFrom(second));
+        __ Sbc(HighRegisterFrom(out), HighRegisterFrom(first), HighRegisterFrom(second));
+      }
       break;
     }
 
@@ -2478,7 +2484,8 @@
   int shift;
   CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
 
-  __ Mov(temp1, Operand::From(magic));
+  // TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
+  __ Mov(temp1, static_cast<int32_t>(magic));
   __ Smull(temp2, temp1, dividend, temp1);
 
   if (imm > 0 && magic < 0) {
@@ -4215,6 +4222,7 @@
         } else {
           codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
         }
+        temps.Release(temp);
       }
       break;
     }
@@ -4254,6 +4262,7 @@
             __ Add(temp, obj, data_offset);
           }
           codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+          temps.Release(temp);
 
           codegen_->MaybeRecordImplicitNullCheck(instruction);
           // If read barriers are enabled, emit read barriers other than
@@ -4275,6 +4284,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4288,6 +4298,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
         GetAssembler()->LoadSFromOffset(out, temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4300,6 +4311,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4389,6 +4401,7 @@
           __ Add(temp, array, data_offset);
         }
         codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+        temps.Release(temp);
       }
       break;
     }
@@ -4410,6 +4423,7 @@
           vixl32::Register temp = temps.Acquire();
           __ Add(temp, array, data_offset);
           codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+          temps.Release(temp);
         }
         codegen_->MaybeRecordImplicitNullCheck(instruction);
         DCHECK(!needs_write_barrier);
@@ -4443,6 +4457,7 @@
             vixl32::Register temp = temps.Acquire();
             __ Add(temp, array, data_offset);
             codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+            temps.Release(temp);
           }
           codegen_->MaybeRecordImplicitNullCheck(instruction);
           __ B(&done);
@@ -4512,6 +4527,7 @@
                                           LocationFrom(source),
                                           temp,
                                           RegisterFrom(index));
+        temps.Release(temp);
       }
 
       if (!may_need_runtime_call_for_type_check) {
@@ -4541,6 +4557,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4555,6 +4572,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
         GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4569,6 +4587,7 @@
         vixl32::Register temp = temps.Acquire();
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset);
+        temps.Release(temp);
       }
       break;
     }
@@ -4904,9 +4923,12 @@
   } else if (source.IsStackSlot() && destination.IsRegister()) {
     Exchange(RegisterFrom(destination), source.GetStackIndex());
   } else if (source.IsStackSlot() && destination.IsStackSlot()) {
-    TODO_VIXL32(FATAL);
+    Exchange(source.GetStackIndex(), destination.GetStackIndex());
   } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
-    TODO_VIXL32(FATAL);
+    vixl32::SRegister temp = temps.AcquireS();
+    __ Vmov(temp, SRegisterFrom(source));
+    __ Vmov(SRegisterFrom(source), SRegisterFrom(destination));
+    __ Vmov(SRegisterFrom(destination), temp);
   } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
     vixl32::DRegister temp = temps.AcquireD();
     __ Vmov(temp, LowRegisterFrom(source), HighRegisterFrom(source));
@@ -4929,9 +4951,27 @@
     __ Vmov(first, second);
     __ Vmov(second, temp);
   } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
-    TODO_VIXL32(FATAL);
+    vixl32::DRegister reg = source.IsFpuRegisterPair()
+        ? DRegisterFrom(source)
+        : DRegisterFrom(destination);
+    int mem = source.IsFpuRegisterPair()
+        ? destination.GetStackIndex()
+        : source.GetStackIndex();
+    vixl32::DRegister temp = temps.AcquireD();
+    __ Vmov(temp, reg);
+    GetAssembler()->LoadDFromOffset(reg, sp, mem);
+    GetAssembler()->StoreDToOffset(temp, sp, mem);
   } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
-    TODO_VIXL32(FATAL);
+    vixl32::SRegister reg = source.IsFpuRegister()
+        ? SRegisterFrom(source)
+        : SRegisterFrom(destination);
+    int mem = source.IsFpuRegister()
+        ? destination.GetStackIndex()
+        : source.GetStackIndex();
+    vixl32::Register temp = temps.Acquire();
+    __ Vmov(temp, reg);
+    GetAssembler()->LoadSFromOffset(reg, sp, mem);
+    GetAssembler()->StoreToOffset(kStoreWord, temp, sp, mem);
   } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
     vixl32::DRegister temp1 = temps.AcquireD();
     vixl32::DRegister temp2 = temps.AcquireD();
@@ -5116,7 +5156,7 @@
   // TODO: Re-add the compiler code to do string dex cache lookup again.
   DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
   InvokeRuntimeCallingConventionARMVIXL calling_convention;
-  __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+  __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
 }
@@ -5715,6 +5755,33 @@
   __ Eor(out, first, value);
 }
 
+void InstructionCodeGeneratorARMVIXL::GenerateAddLongConst(Location out,
+                                                           Location first,
+                                                           uint64_t value) {
+  vixl32::Register out_low = LowRegisterFrom(out);
+  vixl32::Register out_high = HighRegisterFrom(out);
+  vixl32::Register first_low = LowRegisterFrom(first);
+  vixl32::Register first_high = HighRegisterFrom(first);
+  uint32_t value_low = Low32Bits(value);
+  uint32_t value_high = High32Bits(value);
+  if (value_low == 0u) {
+    if (!out_low.Is(first_low)) {
+      __ Mov(out_low, first_low);
+    }
+    __ Add(out_high, first_high, value_high);
+    return;
+  }
+  __ Adds(out_low, first_low, value_low);
+  if (GetAssembler()->ShifterOperandCanHold(ADC, value_high, kCcKeep)) {
+    __ Adc(out_high, first_high, value_high);
+  } else if (GetAssembler()->ShifterOperandCanHold(SBC, ~value_high, kCcKeep)) {
+    __ Sbc(out_high, first_high, ~value_high);
+  } else {
+    LOG(FATAL) << "Unexpected constant " << value_high;
+    UNREACHABLE();
+  }
+}
+
 void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction) {
   LocationSummary* locations = instruction->GetLocations();
   Location first = locations->InAt(0);
@@ -6175,17 +6242,35 @@
   }
 }
 
-void LocationsBuilderARMVIXL::VisitClassTableGet(
-    HClassTableGet* instruction ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitClassTableGet(
-    HClassTableGet* instruction ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void InstructionCodeGeneratorARMVIXL::VisitClassTableGet(HClassTableGet* instruction) {
+  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
+    uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+        instruction->GetIndex(), kArmPointerSize).SizeValue();
+    GetAssembler()->LoadFromOffset(kLoadWord,
+                                   OutputRegister(instruction),
+                                   InputRegisterAt(instruction, 0),
+                                   method_offset);
+  } else {
+    uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+        instruction->GetIndex(), kArmPointerSize));
+    GetAssembler()->LoadFromOffset(kLoadWord,
+                                   OutputRegister(instruction),
+                                   InputRegisterAt(instruction, 0),
+                                   mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+    GetAssembler()->LoadFromOffset(kLoadWord,
+                                   OutputRegister(instruction),
+                                   OutputRegister(instruction),
+                                   method_offset);
+  }
 }
 
-
 #undef __
 #undef QUICK_ENTRY_POINT
 #undef TODO_VIXL32
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 38c756f..89fef43 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -386,6 +386,7 @@
   void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
   void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
   void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
+  void GenerateAddLongConst(Location out, Location first, uint64_t value);
   void HandleBitwiseOperation(HBinaryOperation* operation);
   void HandleCondition(HCondition* condition);
   void HandleIntegerRotate(HRor* ror);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8f94834..572d900 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -280,7 +280,7 @@
 
     InvokeRuntimeCallingConvention calling_convention;
     HLoadString* load = instruction_->AsLoadString();
-    const uint32_t string_index = load->GetStringIndex();
+    const uint32_t string_index = load->GetStringIndex().index_;
     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
     mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1047,7 +1047,7 @@
     uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
     linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
                                                        target_string.dex_file,
-                                                       target_string.string_index));
+                                                       target_string.string_index.index_));
   }
   for (const auto& entry : boot_image_type_patches_) {
     const TypeReference& target_type = entry.first;
@@ -1110,7 +1110,7 @@
 }
 
 Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
-                                                              uint32_t string_index) {
+                                                              dex::StringIndex string_index) {
   return boot_image_string_patches_.GetOrCreate(
       StringReference(&dex_file, string_index),
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -5743,7 +5743,7 @@
       DCHECK(!kEmitCompilerReadBarrier);
       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
       CodeGeneratorMIPS::PcRelativePatchInfo* info =
-          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
       codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
       return;  // No dex cache slow path.
     }
@@ -5759,7 +5759,7 @@
     case HLoadString::LoadKind::kBssEntry: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       CodeGeneratorMIPS::PcRelativePatchInfo* info =
-          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
       codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
       __ LoadFromOffset(kLoadWord, out, out, 0);
       SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -5775,7 +5775,7 @@
   // TODO: Re-add the compiler code to do string dex cache lookup again.
   DCHECK(load_kind == HLoadString::LoadKind::kDexCacheViaMethod);
   InvokeRuntimeCallingConvention calling_convention;
-  __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+  __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
 }
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index e225d20..2273e52 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
 
 #include "code_generator.h"
+#include "dex_file_types.h"
 #include "driver/compiler_options.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
@@ -452,7 +453,8 @@
   PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
   PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
                                                        uint32_t element_offset);
-  Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
+  Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+                                             dex::StringIndex string_index);
   Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
   Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
 
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 02b01c8..b5e9871 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -234,7 +234,7 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
     mips64_codegen->InvokeRuntime(kQuickResolveString,
                                   instruction_,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 51e902a..12aa03c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -225,7 +225,7 @@
     SaveLiveRegisters(codegen, locations);
 
     InvokeRuntimeCallingConvention calling_convention;
-    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
     __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
     x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -4607,7 +4607,7 @@
 
 void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
   DCHECK(GetCompilerOptions().IsBootImage());
-  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
   __ Bind(&string_patches_.back().label);
 }
 
@@ -4618,7 +4618,7 @@
 
 Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
   DCHECK(!GetCompilerOptions().IsBootImage());
-  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
   return &string_patches_.back().label;
 }
 
@@ -6253,10 +6253,11 @@
   }
 }
 
-Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
+Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
+                                               dex::StringIndex dex_index) {
   jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
   // Add a patch entry and return the label.
-  jit_string_patches_.emplace_back(dex_file, dex_index);
+  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
   PatchInfo<Label>* info = &jit_string_patches_.back();
   return &info->label;
 }
@@ -6313,7 +6314,7 @@
   // TODO: Re-add the compiler code to do string dex cache lookup again.
   InvokeRuntimeCallingConvention calling_convention;
   DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
-  __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex()));
+  __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex().index_));
   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
 }
@@ -7755,7 +7756,8 @@
 
 void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   for (const PatchInfo<Label>& info : jit_string_patches_) {
-    const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
+    const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
+                                                            dex::StringIndex(info.index)));
     DCHECK(it != jit_string_roots_.end());
     size_t index_in_table = it->second;
     uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 16ea6b5..2ae3670 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -20,6 +20,7 @@
 #include "arch/x86/instruction_set_features_x86.h"
 #include "base/enums.h"
 #include "code_generator.h"
+#include "dex_file_types.h"
 #include "driver/compiler_options.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
@@ -414,7 +415,7 @@
   void RecordTypePatch(HLoadClass* load_class);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
   Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
-  Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
+  Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
 
   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 3467313..22f7f6b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -300,7 +300,7 @@
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
 
-    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
+    const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
     // Custom calling convention: RAX serves as both input and output.
     __ movl(CpuRegister(RAX), Immediate(string_index));
     x86_64_codegen->InvokeRuntime(kQuickResolveString,
@@ -1106,7 +1106,7 @@
 
 void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
   DCHECK(GetCompilerOptions().IsBootImage());
-  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
   __ Bind(&string_patches_.back().label);
 }
 
@@ -1117,7 +1117,7 @@
 
 Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
   DCHECK(!GetCompilerOptions().IsBootImage());
-  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex());
+  string_patches_.emplace_back(load_string->GetDexFile(), load_string->GetStringIndex().index_);
   return &string_patches_.back().label;
 }
 
@@ -5660,10 +5660,11 @@
   }
 }
 
-Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index) {
+Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
+                                                  dex::StringIndex dex_index) {
   jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
   // Add a patch entry and return the label.
-  jit_string_patches_.emplace_back(dex_file, dex_index);
+  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
   PatchInfo<Label>* info = &jit_string_patches_.back();
   return &info->label;
 }
@@ -5714,7 +5715,7 @@
 
   // TODO: Re-add the compiler code to do string dex cache lookup again.
   // Custom calling convention: RAX serves as both input and output.
-  __ movl(CpuRegister(RAX), Immediate(load->GetStringIndex()));
+  __ movl(CpuRegister(RAX), Immediate(load->GetStringIndex().index_));
   codegen_->InvokeRuntime(kQuickResolveString,
                           load,
                           load->GetDexPc());
@@ -7111,7 +7112,8 @@
 
 void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   for (const PatchInfo<Label>& info : jit_string_patches_) {
-    const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, info.index));
+    const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
+                                                            dex::StringIndex(info.index)));
     DCHECK(it != jit_string_roots_.end());
     size_t index_in_table = it->second;
     uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 0f70b15..2f41f73 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -412,7 +412,7 @@
   void RecordTypePatch(HLoadClass* load_class);
   Label* NewStringBssEntryPatch(HLoadString* load_string);
   Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
-  Label* NewJitRootStringPatch(const DexFile& dex_file, uint32_t dex_index);
+  Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
 
   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
 
diff --git a/compiler/optimizing/escape.cc b/compiler/optimizing/escape.cc
new file mode 100644
index 0000000..c80e19e
--- /dev/null
+++ b/compiler/optimizing/escape.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "escape.h"
+
+#include "nodes.h"
+
+namespace art {
+
+void CalculateEscape(HInstruction* reference,
+                     bool (*no_escape)(HInstruction*, HInstruction*),
+                     /*out*/ bool* is_singleton,
+                     /*out*/ bool* is_singleton_and_non_escaping) {
+  // For references not allocated in the method, don't assume anything.
+  if (!reference->IsNewInstance() && !reference->IsNewArray()) {
+    *is_singleton = false;
+    *is_singleton_and_non_escaping = false;
+    return;
+  }
+  // Assume the best until proven otherwise.
+  *is_singleton = true;
+  *is_singleton_and_non_escaping = true;
+  // Visit all uses to determine if this reference can escape into the heap,
+  // a method call, an alias, etc.
+  for (const HUseListNode<HInstruction*>& use : reference->GetUses()) {
+    HInstruction* user = use.GetUser();
+    if (no_escape != nullptr && (*no_escape)(reference, user)) {
+      // Client supplied analysis says there is no escape.
+      continue;
+    } else if (user->IsBoundType() || user->IsNullCheck()) {
+      // BoundType shouldn't normally be necessary for an allocation. Just be conservative
+      // for the uncommon cases. Similarly, null checks are eventually eliminated for explicit
+      // allocations, but if we see one before it is simplified, assume an alias.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
+               (user->IsInstanceFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsStaticFieldSet() && (reference == user->InputAt(1))) ||
+               (user->IsUnresolvedStaticFieldSet() && (reference == user->InputAt(0))) ||
+               (user->IsArraySet() && (reference == user->InputAt(2)))) {
+      // The reference is merged to HPhi/HSelect, passed to a callee, or stored to heap.
+      // Hence, the reference is no longer the only name that can refer to its value.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if ((user->IsUnresolvedInstanceFieldGet() && (reference == user->InputAt(0))) ||
+               (user->IsUnresolvedInstanceFieldSet() && (reference == user->InputAt(0)))) {
+      // The field is accessed in an unresolved way. We mark the object as a non-singleton.
+      // Note that we could optimize this case and still perform some optimizations until
+      // we hit the unresolved access, but the conservative assumption is the simplest.
+      *is_singleton = false;
+      *is_singleton_and_non_escaping = false;
+      return;
+    } else if (user->IsReturn()) {
+      *is_singleton_and_non_escaping = false;
+    }
+  }
+
+  // Need for further analysis?
+  if (!*is_singleton_and_non_escaping) {
+    return;
+  }
+
+  // Look at the environment uses and if it's for HDeoptimize, it's treated the
+  // same as a return which escapes at the end of executing the compiled code.
+  // Other environment uses are fine, as long as all client optimizations that
+  // rely on this informations are disabled for debuggable.
+  for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+    HEnvironment* user = use.GetUser();
+    if (user->GetHolder()->IsDeoptimize()) {
+      *is_singleton_and_non_escaping = false;
+      break;
+    }
+  }
+}
+
+bool IsNonEscapingSingleton(HInstruction* reference,
+                            bool (*no_escape)(HInstruction*, HInstruction*)) {
+  bool is_singleton = true;
+  bool is_singleton_and_non_escaping = true;
+  CalculateEscape(reference, no_escape, &is_singleton, &is_singleton_and_non_escaping);
+  return is_singleton_and_non_escaping;
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/escape.h b/compiler/optimizing/escape.h
new file mode 100644
index 0000000..6514843
--- /dev/null
+++ b/compiler/optimizing/escape.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_ESCAPE_H_
+#define ART_COMPILER_OPTIMIZING_ESCAPE_H_
+
+namespace art {
+
+class HInstruction;
+
+/*
+ * Methods related to escape analysis, i.e. determining whether an object
+ * allocation is visible outside ('escapes') its immediate method context.
+ */
+
+/*
+ * Performs escape analysis on the given instruction, typically a reference to an
+ * allocation. The method assigns true to parameter 'is_singleton' if the reference
+ * is the only name that can refer to its value during the lifetime of the method,
+ * meaning that the reference is not aliased with something else, is not stored to
+ * heap memory, and not passed to another method. The method assigns true to parameter
+ * 'is_singleton_and_non_escaping' if the reference is a singleton and is not returned
+ * to the caller or used as an environment local of an HDeoptimize instruction.
+ *
+ * When set, the no_escape function is applied to any use of the allocation instruction
+ * prior to any built-in escape analysis. This allows clients to define better escape
+ * analysis in certain case-specific circumstances. If 'no_escape(reference, user)'
+ * returns true, the user is assumed *not* to cause any escape right away. The return
+ * value false means the client cannot provide a definite answer and built-in escape
+ * analysis is applied to the user instead.
+ */
+void CalculateEscape(HInstruction* reference,
+                     bool (*no_escape)(HInstruction*, HInstruction*),
+                     /*out*/ bool* is_singleton,
+                     /*out*/ bool* is_singleton_and_non_escaping);
+
+/*
+ * Convenience method for testing singleton and non-escaping property at once.
+ * Callers should be aware that this method invokes the full analysis at each call.
+ */
+bool IsNonEscapingSingleton(HInstruction* reference,
+                            bool (*no_escape)(HInstruction*, HInstruction*));
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_ESCAPE_H_
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 16a465a..01e89bb 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -258,6 +258,40 @@
   ProfilingInfo* const profiling_info_;
 };
 
+static bool IsMonomorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK_GE(InlineCache::kIndividualCacheSize, 2);
+  return classes->Get(0) != nullptr && classes->Get(1) == nullptr;
+}
+
+static bool IsMegamorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
+    if (classes->Get(i) == nullptr) {
+      return false;
+    }
+  }
+  return true;
+}
+
+static mirror::Class* GetMonomorphicType(Handle<mirror::ObjectArray<mirror::Class>> classes)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(classes->Get(0) != nullptr);
+  return classes->Get(0);
+}
+
+static bool IsUninitialized(Handle<mirror::ObjectArray<mirror::Class>> classes)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return classes->Get(0) == nullptr;
+}
+
+static bool IsPolymorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK_GE(InlineCache::kIndividualCacheSize, 3);
+  return classes->Get(1) != nullptr &&
+      classes->Get(InlineCache::kIndividualCacheSize - 1) == nullptr;
+}
+
 bool HInliner::TryInline(HInvoke* invoke_instruction) {
   if (invoke_instruction->IsInvokeUnresolved()) {
     return false;  // Don't bother to move further if we know the method is unresolved.
@@ -301,31 +335,48 @@
     ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
     ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
     if (profiling_info != nullptr) {
-      const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
-      if (ic.IsUninitialized()) {
-        VLOG(compiler) << "Interface or virtual call to "
-                       << caller_dex_file.PrettyMethod(method_index)
-                       << " is not hit and not inlined";
+      StackHandleScope<1> hs(soa.Self());
+      ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+      Handle<mirror::ObjectArray<mirror::Class>> inline_cache = hs.NewHandle(
+          mirror::ObjectArray<mirror::Class>::Alloc(
+              soa.Self(),
+              class_linker->GetClassRoot(ClassLinker::kClassArrayClass),
+              InlineCache::kIndividualCacheSize));
+      if (inline_cache.Get() == nullptr) {
+        // We got an OOME. Just clear the exception, and don't inline.
+        DCHECK(soa.Self()->IsExceptionPending());
+        soa.Self()->ClearException();
+        VLOG(compiler) << "Out of memory in the compiler when trying to inline";
         return false;
-      } else if (ic.IsMonomorphic()) {
-        MaybeRecordStat(kMonomorphicCall);
-        if (outermost_graph_->IsCompilingOsr()) {
-          // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
-          // interpreter and it may have seen different receiver types.
-          return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
-        } else {
-          return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
-        }
-      } else if (ic.IsPolymorphic()) {
-        MaybeRecordStat(kPolymorphicCall);
-        return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
       } else {
-        DCHECK(ic.IsMegamorphic());
-        VLOG(compiler) << "Interface or virtual call to "
-                       << caller_dex_file.PrettyMethod(method_index)
-                       << " is megamorphic and not inlined";
-        MaybeRecordStat(kMegamorphicCall);
-        return false;
+        Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
+            *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
+            inline_cache);
+        if (IsUninitialized(inline_cache)) {
+          VLOG(compiler) << "Interface or virtual call to "
+                         << caller_dex_file.PrettyMethod(method_index)
+                         << " is not hit and not inlined";
+          return false;
+        } else if (IsMonomorphic(inline_cache)) {
+          MaybeRecordStat(kMonomorphicCall);
+          if (outermost_graph_->IsCompilingOsr()) {
+            // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
+            // interpreter and it may have seen different receiver types.
+            return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+          } else {
+            return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
+          }
+        } else if (IsPolymorphic(inline_cache)) {
+          MaybeRecordStat(kPolymorphicCall);
+          return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+        } else {
+          DCHECK(IsMegamorphic(inline_cache));
+          VLOG(compiler) << "Interface or virtual call to "
+                         << caller_dex_file.PrettyMethod(method_index)
+                         << " is megamorphic and not inlined";
+          MaybeRecordStat(kMegamorphicCall);
+          return false;
+        }
       }
     }
   }
@@ -358,13 +409,13 @@
 
 bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
                                         ArtMethod* resolved_method,
-                                        const InlineCache& ic) {
+                                        Handle<mirror::ObjectArray<mirror::Class>> classes) {
   DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
       << invoke_instruction->DebugName();
 
   const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
   dex::TypeIndex class_index = FindClassIndexIn(
-      ic.GetMonomorphicType(), caller_dex_file, caller_compilation_unit_.GetDexCache());
+      GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
   if (!class_index.IsValid()) {
     VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
                    << " from inline cache is not inlined because its class is not"
@@ -375,11 +426,11 @@
   ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
   PointerSize pointer_size = class_linker->GetImagePointerSize();
   if (invoke_instruction->IsInvokeInterface()) {
-    resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForInterface(
+    resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForInterface(
         resolved_method, pointer_size);
   } else {
     DCHECK(invoke_instruction->IsInvokeVirtual());
-    resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForVirtual(
+    resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForVirtual(
         resolved_method, pointer_size);
   }
   DCHECK(resolved_method != nullptr);
@@ -393,7 +444,7 @@
 
   // We successfully inlined, now add a guard.
   bool is_referrer =
-      (ic.GetMonomorphicType() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+      (GetMonomorphicType(classes) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
   AddTypeGuard(receiver,
                cursor,
                bb_cursor,
@@ -457,11 +508,11 @@
 
 bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
                                         ArtMethod* resolved_method,
-                                        const InlineCache& ic) {
+                                        Handle<mirror::ObjectArray<mirror::Class>> classes) {
   DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
       << invoke_instruction->DebugName();
 
-  if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, ic)) {
+  if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, classes)) {
     return true;
   }
 
@@ -472,16 +523,16 @@
   bool all_targets_inlined = true;
   bool one_target_inlined = false;
   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
-    if (ic.GetTypeAt(i) == nullptr) {
+    if (classes->Get(i) == nullptr) {
       break;
     }
     ArtMethod* method = nullptr;
     if (invoke_instruction->IsInvokeInterface()) {
-      method = ic.GetTypeAt(i)->FindVirtualMethodForInterface(
+      method = classes->Get(i)->FindVirtualMethodForInterface(
           resolved_method, pointer_size);
     } else {
       DCHECK(invoke_instruction->IsInvokeVirtual());
-      method = ic.GetTypeAt(i)->FindVirtualMethodForVirtual(
+      method = classes->Get(i)->FindVirtualMethodForVirtual(
           resolved_method, pointer_size);
     }
 
@@ -490,20 +541,20 @@
     HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
 
     dex::TypeIndex class_index = FindClassIndexIn(
-        ic.GetTypeAt(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
+        classes->Get(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
     HInstruction* return_replacement = nullptr;
     if (!class_index.IsValid() ||
         !TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
       all_targets_inlined = false;
     } else {
       one_target_inlined = true;
-      bool is_referrer = (ic.GetTypeAt(i) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+      bool is_referrer = (classes->Get(i) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
 
       // If we have inlined all targets before, and this receiver is the last seen,
       // we deoptimize instead of keeping the original invoke instruction.
       bool deoptimize = all_targets_inlined &&
           (i != InlineCache::kIndividualCacheSize - 1) &&
-          (ic.GetTypeAt(i + 1) == nullptr);
+          (classes->Get(i + 1) == nullptr);
 
       if (outermost_graph_->IsCompilingOsr()) {
         // We do not support HDeoptimize in OSR methods.
@@ -618,9 +669,10 @@
       merge, original_invoke_block, /* replace_if_back_edge */ true);
 }
 
-bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
-                                                    ArtMethod* resolved_method,
-                                                    const InlineCache& ic) {
+bool HInliner::TryInlinePolymorphicCallToSameTarget(
+    HInvoke* invoke_instruction,
+    ArtMethod* resolved_method,
+    Handle<mirror::ObjectArray<mirror::Class>> classes) {
   // This optimization only works under JIT for now.
   DCHECK(Runtime::Current()->UseJitCompilation());
   if (graph_->GetInstructionSet() == kMips64) {
@@ -639,12 +691,12 @@
   // Check whether we are actually calling the same method among
   // the different types seen.
   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
-    if (ic.GetTypeAt(i) == nullptr) {
+    if (classes->Get(i) == nullptr) {
       break;
     }
     ArtMethod* new_method = nullptr;
     if (invoke_instruction->IsInvokeInterface()) {
-      new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get(
+      new_method = classes->Get(i)->GetImt(pointer_size)->Get(
           method_index, pointer_size);
       if (new_method->IsRuntimeMethod()) {
         // Bail out as soon as we see a conflict trampoline in one of the target's
@@ -653,7 +705,7 @@
       }
     } else {
       DCHECK(invoke_instruction->IsInvokeVirtual());
-      new_method = ic.GetTypeAt(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
+      new_method = classes->Get(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
     }
     DCHECK(new_method != nullptr);
     if (actual_method == nullptr) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 682393e..a2b4fc9 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -28,7 +28,6 @@
 class DexCompilationUnit;
 class HGraph;
 class HInvoke;
-class InlineCache;
 class OptimizingCompilerStats;
 
 class HInliner : public HOptimization {
@@ -105,18 +104,18 @@
   // ... // inlined code
   bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
                                 ArtMethod* resolved_method,
-                                const InlineCache& ic)
+                                Handle<mirror::ObjectArray<mirror::Class>> classes)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to inline targets of a polymorphic call.
   bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
                                 ArtMethod* resolved_method,
-                                const InlineCache& ic)
+                                Handle<mirror::ObjectArray<mirror::Class>> classes)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
                                             ArtMethod* resolved_method,
-                                            const InlineCache& ic)
+                                            Handle<mirror::ObjectArray<mirror::Class>> classes)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 40de5ce..b97581b 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -2625,7 +2625,7 @@
     }
 
     case Instruction::CONST_STRING: {
-      uint32_t string_index = instruction.VRegB_21c();
+      dex::StringIndex string_index(instruction.VRegB_21c());
       AppendInstruction(
           new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
       UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
@@ -2633,7 +2633,7 @@
     }
 
     case Instruction::CONST_STRING_JUMBO: {
-      uint32_t string_index = instruction.VRegB_31c();
+      dex::StringIndex string_index(instruction.VRegB_31c());
       AppendInstruction(
           new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
       UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 451abc5..17a97da 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2165,11 +2165,11 @@
   __ Cbz(dst, slow_path->GetEntryLabel());
 
   if (!length.IsConstant()) {
-    // If the length is negative, bail out.
-    __ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel());
-    // If the length > 32 then (currently) prefer libcore's native implementation.
+    // Merge the following two comparisons into one:
+    //   If the length is negative, bail out (delegate to libcore's native implementation).
+    //   If the length > 32 then (currently) prefer libcore's native implementation.
     __ Cmp(WRegisterFrom(length), kSystemArrayCopyCharThreshold);
-    __ B(slow_path->GetEntryLabel(), gt);
+    __ B(slow_path->GetEntryLabel(), hi);
   } else {
     // We have already checked in the LocationsBuilder for the constant case.
     DCHECK_GE(length.GetConstant()->AsIntConstant()->GetValue(), 0);
@@ -2379,11 +2379,11 @@
   if (!length.IsConstant() &&
       !optimizations.GetCountIsSourceLength() &&
       !optimizations.GetCountIsDestinationLength()) {
-    // If the length is negative, bail out.
-    __ Tbnz(WRegisterFrom(length), kWRegSize - 1, intrinsic_slow_path->GetEntryLabel());
-    // If the length >= 128 then (currently) prefer native implementation.
+    // Merge the following two comparisons into one:
+    //   If the length is negative, bail out (delegate to libcore's native implementation).
+    //   If the length >= 128 then (currently) prefer native implementation.
     __ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold);
-    __ B(intrinsic_slow_path->GetEntryLabel(), ge);
+    __ B(intrinsic_slow_path->GetEntryLabel(), hs);
   }
   // Validity checks: source.
   CheckSystemArrayCopyPosition(masm,
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 15e6059..edecf17 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -15,6 +15,8 @@
  */
 
 #include "load_store_elimination.h"
+
+#include "escape.h"
 #include "side_effects_analysis.h"
 
 #include <iostream>
@@ -31,70 +33,12 @@
 // whether it's a singleton, returned, etc.
 class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
  public:
-  ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
-    is_singleton_ = true;
-    is_singleton_and_non_escaping_ = true;
-    if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
-      // For references not allocated in the method, don't assume anything.
-      is_singleton_ = false;
-      is_singleton_and_non_escaping_ = false;
-      return;
-    }
-
-    // Visit all uses to determine if this reference can spread into the heap,
-    // a method call, etc.
-    for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) {
-      HInstruction* user = use.GetUser();
-      DCHECK(!user->IsNullCheck()) << "NullCheck should have been eliminated";
-      if (user->IsBoundType()) {
-        // BoundType shouldn't normally be necessary for a NewInstance.
-        // Just be conservative for the uncommon cases.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
-          (user->IsInstanceFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsStaticFieldSet() && (reference_ == user->InputAt(1))) ||
-          (user->IsUnresolvedStaticFieldSet() && (reference_ == user->InputAt(0))) ||
-          (user->IsArraySet() && (reference_ == user->InputAt(2)))) {
-        // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
-        // reference_ isn't the only name that can refer to its value anymore.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if ((user->IsUnresolvedInstanceFieldGet() && (reference_ == user->InputAt(0))) ||
-          (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(0)))) {
-        // The field is accessed in an unresolved way. We mark the object as a non-singleton
-        // to disable load/store optimizations on it.
-        // Note that we could optimize this case and still perform some optimizations until
-        // we hit the unresolved access, but disabling is the simplest.
-        is_singleton_ = false;
-        is_singleton_and_non_escaping_ = false;
-        return;
-      }
-      if (user->IsReturn()) {
-        is_singleton_and_non_escaping_ = false;
-      }
-    }
-
-    if (!is_singleton_ || !is_singleton_and_non_escaping_) {
-      return;
-    }
-
-    // Look at Environment uses and if it's for HDeoptimize, it's treated the same
-    // as a return which escapes at the end of executing the compiled code. We don't
-    // do store elimination for singletons that escape through HDeoptimize.
-    // Other Environment uses are fine since LSE is disabled for debuggable.
-    for (const HUseListNode<HEnvironment*>& use : reference_->GetEnvUses()) {
-      HEnvironment* user = use.GetUser();
-      if (user->GetHolder()->IsDeoptimize()) {
-        is_singleton_and_non_escaping_ = false;
-        break;
-      }
-    }
+  ReferenceInfo(HInstruction* reference, size_t pos)
+      : reference_(reference),
+        position_(pos),
+        is_singleton_(true),
+        is_singleton_and_non_escaping_(true) {
+    CalculateEscape(reference_, nullptr, &is_singleton_, &is_singleton_and_non_escaping_);
   }
 
   HInstruction* GetReference() const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index eebc49c..165dce3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5698,7 +5698,7 @@
   };
 
   HLoadString(HCurrentMethod* current_method,
-              uint32_t string_index,
+              dex::StringIndex string_index,
               const DexFile& dex_file,
               uint32_t dex_pc)
       : HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
@@ -5717,7 +5717,7 @@
 
   void SetLoadKindWithStringReference(LoadKind load_kind,
                                       const DexFile& dex_file,
-                                      uint32_t string_index) {
+                                      dex::StringIndex string_index) {
     DCHECK(HasStringReference(load_kind));
     load_data_.dex_file_ = &dex_file;
     string_index_ = string_index;
@@ -5730,7 +5730,7 @@
 
   const DexFile& GetDexFile() const;
 
-  uint32_t GetStringIndex() const {
+  dex::StringIndex GetStringIndex() const {
     DCHECK(HasStringReference(GetLoadKind()) || /* For slow paths. */ !IsInDexCache());
     return string_index_;
   }
@@ -5744,7 +5744,7 @@
 
   bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
 
-  size_t ComputeHashCode() const OVERRIDE { return string_index_; }
+  size_t ComputeHashCode() const OVERRIDE { return string_index_.index_; }
 
   // Will call the runtime if we need to load the string through
   // the dex cache and the string is not guaranteed to be there yet.
@@ -5823,7 +5823,7 @@
 
   // String index serves also as the hash code and it's also needed for slow-paths,
   // so it must not be overwritten with other load data.
-  uint32_t string_index_;
+  dex::StringIndex string_index_;
 
   union {
     const DexFile* dex_file_;            // For string reference.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index a127708..daf160a 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -267,7 +267,7 @@
   DCHECK(!load_string->IsInDexCache());
 
   const DexFile& dex_file = load_string->GetDexFile();
-  uint32_t string_index = load_string->GetStringIndex();
+  dex::StringIndex string_index = load_string->GetStringIndex();
 
   HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
   uint64_t address = 0u;  // String or dex cache element address.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
index cf7a4d1..3f425df 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -594,6 +594,41 @@
   __ b(slow->Entry(), NE);
 }
 
+std::unique_ptr<JNIMacroLabel> ArmJNIMacroAssembler::CreateLabel() {
+  return std::unique_ptr<JNIMacroLabel>(new ArmJNIMacroLabel());
+}
+
+void ArmJNIMacroAssembler::Jump(JNIMacroLabel* label) {
+  CHECK(label != nullptr);
+  __ b(ArmJNIMacroLabel::Cast(label)->AsArm());
+}
+
+void ArmJNIMacroAssembler::Jump(JNIMacroLabel* label,
+                                JNIMacroUnaryCondition condition,
+                                ManagedRegister test) {
+  CHECK(label != nullptr);
+
+  arm::Condition arm_cond;
+  switch (condition) {
+    case JNIMacroUnaryCondition::kZero:
+      arm_cond = EQ;
+      break;
+    case JNIMacroUnaryCondition::kNotZero:
+      arm_cond = NE;
+      break;
+    default:
+      LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+      UNREACHABLE();
+  }
+  __ cmp(test.AsArm().AsCoreRegister(), ShifterOperand(0));
+  __ b(ArmJNIMacroLabel::Cast(label)->AsArm(), arm_cond);
+}
+
+void ArmJNIMacroAssembler::Bind(JNIMacroLabel* label) {
+  CHECK(label != nullptr);
+  __ Bind(ArmJNIMacroLabel::Cast(label)->AsArm());
+}
+
 #undef __
 
 void ArmExceptionSlowPath::Emit(Assembler* sasm) {
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.h b/compiler/utils/arm/jni_macro_assembler_arm.h
index 4471906..809ac8b 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm.h
@@ -25,6 +25,7 @@
 #include "base/enums.h"
 #include "base/macros.h"
 #include "utils/jni_macro_assembler.h"
+#include "utils/label.h"
 #include "offsets.h"
 
 namespace art {
@@ -159,10 +160,26 @@
 
   void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
 
+  // Create a new label that can be used with Jump/Bind calls.
+  std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+  // Emit an unconditional jump to the label.
+  void Jump(JNIMacroLabel* label) OVERRIDE;
+  // Emit a conditional jump to the label by applying a unary condition test to the register.
+  void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+  // Code at this offset will serve as the target for the Jump call.
+  void Bind(JNIMacroLabel* label) OVERRIDE;
+
  private:
   std::unique_ptr<ArmAssembler> asm_;
 };
 
+class ArmJNIMacroLabel FINAL : public JNIMacroLabelCommon<ArmJNIMacroLabel, art::Label, kArm> {
+ public:
+  art::Label* AsArm() {
+    return AsPlatformLabel();
+  }
+};
+
 }  // namespace arm
 }  // namespace art
 
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index f20ed0a..fb6f172 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -564,6 +564,8 @@
 }
 
 void ArmVIXLJNIMacroAssembler::GetCurrentThread(ManagedRegister mtr) {
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(mtr.AsArm().AsVIXLRegister());
   ___ Mov(mtr.AsArm().AsVIXLRegister(), tr);
 }
 
@@ -608,6 +610,8 @@
                                     ManagedRegister test) {
   CHECK(label != nullptr);
 
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(test.AsArm().AsVIXLRegister());
   switch (condition) {
     case JNIMacroUnaryCondition::kZero:
       ___ CompareAndBranchIfZero(test.AsArm().AsVIXLRegister(),
diff --git a/compiler/utils/atomic_method_ref_map-inl.h b/compiler/utils/atomic_method_ref_map-inl.h
new file mode 100644
index 0000000..70ea028
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map-inl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
+
+#include "atomic_method_ref_map.h"
+
+#include "dex_file-inl.h"
+
+namespace art {
+
+template <typename T>
+inline typename AtomicMethodRefMap<T>::InsertResult AtomicMethodRefMap<T>::Insert(
+    MethodReference ref,
+    const T& expected,
+    const T& desired) {
+  ElementArray* const array = GetArray(ref.dex_file);
+  if (array == nullptr) {
+    return kInsertResultInvalidDexFile;
+  }
+  return (*array)[ref.dex_method_index].CompareExchangeStrongSequentiallyConsistent(
+      expected, desired)
+      ? kInsertResultSuccess
+      : kInsertResultCASFailure;
+}
+
+template <typename T>
+inline bool AtomicMethodRefMap<T>::Get(MethodReference ref, T* out) const {
+  const ElementArray* const array = GetArray(ref.dex_file);
+  if (array == nullptr) {
+    return kInsertResultInvalidDexFile;
+  }
+  *out = (*array)[ref.dex_method_index].LoadRelaxed();
+  return true;
+}
+
+template <typename T>
+inline void AtomicMethodRefMap<T>::AddDexFile(const DexFile* dex_file) {
+  arrays_.Put(dex_file, std::move(ElementArray(dex_file->NumMethodIds())));
+}
+
+template <typename T>
+inline typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+    const DexFile* dex_file) {
+  auto it = arrays_.find(dex_file);
+  return (it != arrays_.end()) ? &it->second : nullptr;
+}
+
+template <typename T>
+inline const typename AtomicMethodRefMap<T>::ElementArray* AtomicMethodRefMap<T>::GetArray(
+    const DexFile* dex_file) const {
+  auto it = arrays_.find(dex_file);
+  return (it != arrays_.end()) ? &it->second : nullptr;
+}
+
+template <typename T> template <typename Visitor>
+inline void AtomicMethodRefMap<T>::Visit(const Visitor& visitor) {
+  for (auto& pair : arrays_) {
+    const DexFile* dex_file = pair.first;
+    const ElementArray& elements = pair.second;
+    for (size_t i = 0; i < elements.size(); ++i) {
+      visitor(MethodReference(dex_file, i), elements[i].LoadRelaxed());
+    }
+  }
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_INL_H_
diff --git a/compiler/utils/atomic_method_ref_map.h b/compiler/utils/atomic_method_ref_map.h
new file mode 100644
index 0000000..f0db231
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+#define ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
+
+#include "base/dchecked_vector.h"
+#include "method_reference.h"
+#include "safe_map.h"
+
+namespace art {
+
+class DexFile;
+
+// Used by CompilerCallbacks to track verification information from the Runtime.
+template <typename T>
+class AtomicMethodRefMap {
+ public:
+  explicit AtomicMethodRefMap() {}
+  ~AtomicMethodRefMap() {}
+
+  // Atomically swap the element in if the existing value matches expected.
+  enum InsertResult {
+    kInsertResultInvalidDexFile,
+    kInsertResultCASFailure,
+    kInsertResultSuccess,
+  };
+  InsertResult Insert(MethodReference ref, const T& expected, const T& desired);
+
+  // Retreive an item, returns false if the dex file is not added.
+  bool Get(MethodReference ref, T* out) const;
+
+  // Dex files must be added before method references belonging to them can be used as keys. Not
+  // thread safe.
+  void AddDexFile(const DexFile* dex_file);
+
+  // Visit all of the dex files and elements.
+  template <typename Visitor>
+  void Visit(const Visitor& visitor);
+
+ private:
+  // Verified methods. The method array is fixed to avoid needing a lock to extend it.
+  using ElementArray = dchecked_vector<Atomic<T>>;
+  using DexFileArrays = SafeMap<const DexFile*, ElementArray>;
+
+  const ElementArray* GetArray(const DexFile* dex_file) const;
+  ElementArray* GetArray(const DexFile* dex_file);
+
+  DexFileArrays arrays_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ATOMIC_METHOD_REF_MAP_H_
diff --git a/compiler/utils/atomic_method_ref_map_test.cc b/compiler/utils/atomic_method_ref_map_test.cc
new file mode 100644
index 0000000..c3e48ff
--- /dev/null
+++ b/compiler/utils/atomic_method_ref_map_test.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "atomic_method_ref_map-inl.h"
+
+#include <memory>
+
+#include "common_runtime_test.h"
+#include "dex_file-inl.h"
+#include "method_reference.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+class AtomicMethodRefMapTest : public CommonRuntimeTest {};
+
+TEST_F(AtomicMethodRefMapTest, RunTests) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("Interfaces"));
+  ASSERT_TRUE(dex != nullptr);
+  using Map = AtomicMethodRefMap<int>;
+  Map map;
+  int value = 123;
+  // Error case: Not already inserted.
+  EXPECT_FALSE(map.Get(MethodReference(dex.get(), 1), &value));
+  // Error case: Dex file not registered.
+  EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, 1) == Map::kInsertResultInvalidDexFile);
+  map.AddDexFile(dex.get());
+  EXPECT_GT(dex->NumMethodIds(), 10u);
+  // After we have added the get should succeed but return the default value.
+  EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+  EXPECT_EQ(value, 0);
+  // Actually insert an item and make sure we can retreive it.
+  static const int kInsertValue = 44;
+  EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue) ==
+              Map::kInsertResultSuccess);
+  EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+  EXPECT_EQ(value, kInsertValue);
+  static const int kInsertValue2 = 123;
+  EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 2), 0, kInsertValue2) ==
+              Map::kInsertResultSuccess);
+  EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+  EXPECT_EQ(value, kInsertValue);
+  EXPECT_TRUE(map.Get(MethodReference(dex.get(), 2), &value));
+  EXPECT_EQ(value, kInsertValue2);
+  // Error case: Incorrect expected value for CAS.
+  EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), 0, kInsertValue + 1) ==
+      Map::kInsertResultCASFailure);
+  // Correctly overwrite the value and verify.
+  EXPECT_TRUE(map.Insert(MethodReference(dex.get(), 1), kInsertValue, kInsertValue + 1) ==
+      Map::kInsertResultSuccess);
+  EXPECT_TRUE(map.Get(MethodReference(dex.get(), 1), &value));
+  EXPECT_EQ(value, kInsertValue + 1);
+}
+
+}  // namespace art
diff --git a/compiler/utils/string_reference_test.cc b/compiler/utils/string_reference_test.cc
index 0fd9e5b..90335eb 100644
--- a/compiler/utils/string_reference_test.cc
+++ b/compiler/utils/string_reference_test.cc
@@ -18,6 +18,7 @@
 
 #include <memory>
 
+#include "dex_file_types.h"
 #include "gtest/gtest.h"
 #include "utils/test_dex_file_builder.h"
 
@@ -34,15 +35,15 @@
   builder1.AddString("String1");
   std::unique_ptr<const DexFile> dex_file1 = builder1.Build("dummy location 1");
   ASSERT_EQ(1u, dex_file1->NumStringIds());
-  ASSERT_STREQ("String1", dex_file1->GetStringData(dex_file1->GetStringId(0)));
-  StringReference sr1(dex_file1.get(), 0);
+  ASSERT_STREQ("String1", dex_file1->GetStringData(dex_file1->GetStringId(dex::StringIndex(0))));
+  StringReference sr1(dex_file1.get(), dex::StringIndex(0));
 
   TestDexFileBuilder builder2;
   builder2.AddString("String2");
   std::unique_ptr<const DexFile> dex_file2 = builder2.Build("dummy location 2");
   ASSERT_EQ(1u, dex_file2->NumStringIds());
-  ASSERT_STREQ("String2", dex_file2->GetStringData(dex_file2->GetStringId(0)));
-  StringReference sr2(dex_file2.get(), 0);
+  ASSERT_STREQ("String2", dex_file2->GetStringData(dex_file2->GetStringId(dex::StringIndex(0))));
+  StringReference sr2(dex_file2.get(), dex::StringIndex(0));
 
   StringReferenceValueComparator cmp;
   EXPECT_TRUE(cmp(sr1, sr2));  // "String1" < "String2" is true.
@@ -80,7 +81,8 @@
   std::unique_ptr<const DexFile> dex_file1 = builder1.Build("dummy location 1");
   ASSERT_EQ(arraysize(kDexFile1Strings), dex_file1->NumStringIds());
   for (size_t index = 0; index != arraysize(kDexFile1Strings); ++index) {
-    ASSERT_STREQ(kDexFile1Strings[index], dex_file1->GetStringData(dex_file1->GetStringId(index)));
+    ASSERT_STREQ(kDexFile1Strings[index],
+                 dex_file1->GetStringData(dex_file1->GetStringId(dex::StringIndex(index))));
   }
 
   TestDexFileBuilder builder2;
@@ -90,14 +92,15 @@
   std::unique_ptr<const DexFile> dex_file2 = builder2.Build("dummy location 1");
   ASSERT_EQ(arraysize(kDexFile2Strings), dex_file2->NumStringIds());
   for (size_t index = 0; index != arraysize(kDexFile2Strings); ++index) {
-    ASSERT_STREQ(kDexFile2Strings[index], dex_file2->GetStringData(dex_file2->GetStringId(index)));
+    ASSERT_STREQ(kDexFile2Strings[index],
+                 dex_file2->GetStringData(dex_file2->GetStringId(dex::StringIndex(index))));
   }
 
   StringReferenceValueComparator cmp;
   for (size_t index1 = 0; index1 != arraysize(kDexFile1Strings); ++index1) {
     for (size_t index2 = 0; index2 != arraysize(kDexFile2Strings); ++index2) {
-      StringReference sr1(dex_file1.get(), index1);
-      StringReference sr2(dex_file2.get(), index2);
+      StringReference sr1(dex_file1.get(), dex::StringIndex(index1));
+      StringReference sr2(dex_file2.get(), dex::StringIndex(index2));
       EXPECT_EQ(expectedCmp12[index1][index2], cmp(sr1, sr2)) << index1 << " " << index2;
       EXPECT_EQ(expectedCmp21[index2][index1], cmp(sr2, sr1)) << index1 << " " << index2;
     }
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
index 922f8b1..c76739b 100644
--- a/compiler/utils/test_dex_file_builder_test.cc
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -49,7 +49,8 @@
   };
   ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
   for (size_t i = 0; i != arraysize(expected_strings); ++i) {
-    EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+    EXPECT_STREQ(expected_strings[i],
+                 dex_file->GetStringData(dex_file->GetStringId(dex::StringIndex(i)))) << i;
   }
 
   static const char* const expected_types[] = {
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 525a2ee..90fe6da 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -460,20 +460,20 @@
   ScopedObjectAccess soa(Thread::Current());
   LoadDexFile(&soa);
 
-  uint32_t id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
-  ASSERT_LT(id_Main1, primary_dex_file_->NumStringIds());
+  dex::StringIndex id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+  ASSERT_LT(id_Main1.index_, primary_dex_file_->NumStringIds());
   ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main1));
 
-  uint32_t id_Main2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
-  ASSERT_LT(id_Main2, primary_dex_file_->NumStringIds());
+  dex::StringIndex id_Main2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+  ASSERT_LT(id_Main2.index_, primary_dex_file_->NumStringIds());
   ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main2));
 
-  uint32_t id_Lorem1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
-  ASSERT_GE(id_Lorem1, primary_dex_file_->NumStringIds());
+  dex::StringIndex id_Lorem1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+  ASSERT_GE(id_Lorem1.index_, primary_dex_file_->NumStringIds());
   ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem1));
 
-  uint32_t id_Lorem2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
-  ASSERT_GE(id_Lorem2, primary_dex_file_->NumStringIds());
+  dex::StringIndex id_Lorem2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+  ASSERT_GE(id_Lorem2.index_, primary_dex_file_->NumStringIds());
   ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem2));
 
   ASSERT_EQ(id_Main1, id_Main2);
@@ -1306,9 +1306,10 @@
     bool found = false;
     for (const auto& entry : deps->fields_) {
       if (!entry.IsResolved()) {
+        constexpr dex::StringIndex kStringIndexZero(0);  // We know there is a class there.
         deps->fields_.insert(VerifierDeps::FieldResolution(0 /* we know there is a field there */,
                                                            VerifierDeps::kUnresolvedMarker - 1,
-                                                           0  /* we know there is a class there */));
+                                                           kStringIndexZero));
         found = true;
         break;
       }
@@ -1341,7 +1342,7 @@
     VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
     bool found = false;
     for (const auto& entry : deps->fields_) {
-      static constexpr uint32_t kNewTypeIndex = 0;
+      constexpr dex::StringIndex kNewTypeIndex(0);
       if (entry.GetDeclaringClassIndex() != kNewTypeIndex) {
         deps->fields_.insert(VerifierDeps::FieldResolution(entry.GetDexFieldIndex(),
                                                            entry.GetAccessFlags(),
@@ -1384,9 +1385,10 @@
       std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
       for (const auto& entry : *methods) {
         if (!entry.IsResolved()) {
+          constexpr dex::StringIndex kStringIndexZero(0);  // We know there is a class there.
           methods->insert(VerifierDeps::MethodResolution(0 /* we know there is a method there */,
                                                          VerifierDeps::kUnresolvedMarker - 1,
-                                                         0  /* we know there is a class there */));
+                                                         kStringIndexZero));
           found = true;
           break;
         }
@@ -1421,7 +1423,7 @@
       bool found = false;
       std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
       for (const auto& entry : *methods) {
-        static constexpr uint32_t kNewTypeIndex = 0;
+        constexpr dex::StringIndex kNewTypeIndex(0);
         if (entry.IsResolved() && entry.GetDeclaringClassIndex() != kNewTypeIndex) {
           methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
                                                          entry.GetAccessFlags(),
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9e6032f..91a32f9 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1636,7 +1636,7 @@
                                         soa.Decode<mirror::ClassLoader>(class_loader_).Ptr())));
       // Pre-register dex files so that we can access verification results without locks during
       // compilation and verification.
-      verification_results_->PreRegisterDexFile(dex_file);
+      verification_results_->AddDexFile(dex_file);
     }
 
     return true;
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 03d6227..916984c 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -475,9 +475,9 @@
     case DexFile::kDexAnnotationString: {
       const u4 idx = static_cast<u4>(readVarWidth(data, arg, false));
       if (gOptions.outputFormat == OUTPUT_PLAIN) {
-        dumpEscapedString(pDexFile->StringDataByIdx(idx));
+        dumpEscapedString(pDexFile->StringDataByIdx(dex::StringIndex(idx)));
       } else {
-        dumpXmlAttribute(pDexFile->StringDataByIdx(idx));
+        dumpXmlAttribute(pDexFile->StringDataByIdx(dex::StringIndex(idx)));
       }
       break;
     }
@@ -518,7 +518,7 @@
       for (u4 i = 0; i < size; i++) {
         const u4 name_idx = DecodeUnsignedLeb128(data);
         fputc(' ', gOutFile);
-        fputs(pDexFile->StringDataByIdx(name_idx), gOutFile);
+        fputs(pDexFile->StringDataByIdx(dex::StringIndex(name_idx)), gOutFile);
         fputc('=', gOutFile);
         dumpEncodedValue(pDexFile, data);
       }
@@ -599,7 +599,7 @@
   fprintf(gOutFile, "superclass_idx      : %d\n", pClassDef.superclass_idx_.index_);
   fprintf(gOutFile, "interfaces_off      : %d (0x%06x)\n",
           pClassDef.interfaces_off_, pClassDef.interfaces_off_);
-  fprintf(gOutFile, "source_file_idx     : %d\n", pClassDef.source_file_idx_);
+  fprintf(gOutFile, "source_file_idx     : %d\n", pClassDef.source_file_idx_.index_);
   fprintf(gOutFile, "annotations_off     : %d (0x%06x)\n",
           pClassDef.annotations_off_, pClassDef.annotations_off_);
   fprintf(gOutFile, "class_data_off      : %d (0x%06x)\n",
@@ -842,7 +842,7 @@
       break;
     case Instruction::kIndexStringRef:
       if (index < pDexFile->GetHeader().string_ids_size_) {
-        const char* st = pDexFile->StringDataByIdx(index);
+        const char* st = pDexFile->StringDataByIdx(dex::StringIndex(index));
         outSize = snprintf(buf.get(), bufSize, "\"%s\" // string@%0*x", st, width, index);
       } else {
         outSize = snprintf(buf.get(), bufSize, "<string?> // string@%0*x", width, index);
@@ -1564,13 +1564,13 @@
   // End of class.
   if (gOptions.outputFormat == OUTPUT_PLAIN) {
     const char* fileName;
-    if (pClassDef.source_file_idx_ != DexFile::kDexNoIndex) {
+    if (pClassDef.source_file_idx_.IsValid()) {
       fileName = pDexFile->StringDataByIdx(pClassDef.source_file_idx_);
     } else {
       fileName = "unknown";
     }
     fprintf(gOutFile, "  source_file_idx   : %d (%s)\n\n",
-            pClassDef.source_file_idx_, fileName);
+            pClassDef.source_file_idx_.index_, fileName);
   } else if (gOptions.outputFormat == OUTPUT_XML) {
     fprintf(gOutFile, "</class>\n");
   }
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index fe2bcce..b1e66be 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -340,7 +340,7 @@
 }
 
 void Collections::CreateStringId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::StringId& disk_string_id = dex_file.GetStringId(i);
+  const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
   StringData* string_data = new StringData(dex_file.GetStringData(disk_string_id));
   string_datas_.AddItem(string_data, disk_string_id.string_data_off_);
 
@@ -350,7 +350,7 @@
 
 void Collections::CreateTypeId(const DexFile& dex_file, uint32_t i) {
   const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
-  TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_));
+  TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_.index_));
   type_ids_.AddIndexedItem(type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i);
 }
 
@@ -359,7 +359,7 @@
   const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
   TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
 
-  ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_),
+  ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_.index_),
                                   GetTypeId(disk_proto_id.return_type_idx_.index_),
                                   parameter_type_list);
   proto_ids_.AddIndexedItem(proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i);
@@ -369,7 +369,7 @@
   const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
   FieldId* field_id = new FieldId(GetTypeId(disk_field_id.class_idx_.index_),
                                   GetTypeId(disk_field_id.type_idx_.index_),
-                                  GetStringId(disk_field_id.name_idx_));
+                                  GetStringId(disk_field_id.name_idx_.index_));
   field_ids_.AddIndexedItem(field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i);
 }
 
@@ -377,7 +377,7 @@
   const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
   MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_.index_),
                                      GetProtoId(disk_method_id.proto_idx_),
-                                     GetStringId(disk_method_id.name_idx_));
+                                     GetStringId(disk_method_id.name_idx_.index_));
   method_ids_.AddIndexedItem(method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
 }
 
@@ -390,7 +390,7 @@
   const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
   TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_);
 
-  const StringId* source_file = GetStringIdOrNullPtr(disk_class_def.source_file_idx_);
+  const StringId* source_file = GetStringIdOrNullPtr(disk_class_def.source_file_idx_.index_);
   // Annotations.
   AnnotationsDirectoryItem* annotations = nullptr;
   const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 68473c4..efe1aad 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -140,7 +140,7 @@
   const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
 
   const char* fileName;
-  if (pClassDef.source_file_idx_ == DexFile::kDexNoIndex) {
+  if (!pClassDef.source_file_idx_.IsValid()) {
     fileName = nullptr;
   } else {
     fileName = pDexFile->StringDataByIdx(pClassDef.source_file_idx_);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3ad0f1e..a1984a7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -703,13 +703,13 @@
         const Instruction* inst = Instruction::At(code_ptr);
         switch (inst->Opcode()) {
           case Instruction::CONST_STRING: {
-            const uint32_t string_index = inst->VRegB_21c();
+            const dex::StringIndex string_index(inst->VRegB_21c());
             unique_string_ids_from_code_.insert(StringReference(&dex_file, string_index));
             ++num_string_ids_from_code_;
             break;
           }
           case Instruction::CONST_STRING_JUMBO: {
-            const uint32_t string_index = inst->VRegB_31c();
+            const dex::StringIndex string_index(inst->VRegB_31c());
             unique_string_ids_from_code_.insert(StringReference(&dex_file, string_index));
             ++num_string_ids_from_code_;
             break;
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 6a442a5..5c56923 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -71,7 +71,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints);
+  ResetQuickAllocEntryPoints(qpoints, /*is_marking*/ false);
 
   // Cast
   qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index fa86bf4..db2fdca 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -107,7 +107,28 @@
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
 .endm
 
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
+.endm
+
 .macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
@@ -187,20 +208,6 @@
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
 
-// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index fb405fa..6fbc954 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1085,15 +1085,12 @@
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER             // return or deliver exception
 END_MACRO
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be called
+// for CC if the GC is not marking.
 DEFINE_FUNCTION art_quick_alloc_object_tlab
     // Fast path tlab allocation.
     // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
     // EBX, EDX: free.
-#if defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
     PUSH esi
     PUSH edi
     movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx   // Load dex cache resolved types array
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 860b77e..f8066e4 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -18,6 +18,13 @@
 
 #include "arch/quick_alloc_entrypoints.S"
 
+MACRO0(ASSERT_USE_READ_BARRIER)
+#if !defined(USE_READ_BARRIER)
+    int3
+    int3
+#endif
+END_MACRO
+
 MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
     // Create space for ART FP callee-saved registers
     subq MACRO_LITERAL(4 * 8), %rsp
@@ -972,8 +979,10 @@
 END_MACRO
 
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+
 // Comment out allocators that have x86_64 specific asm.
+// Region TLAB:
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
@@ -986,6 +995,19 @@
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+// Normal TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
 
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
 DEFINE_FUNCTION art_quick_alloc_object_rosalloc
@@ -1162,16 +1184,11 @@
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER                    // return or deliver exception
 END_MACRO
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be
+// called with CC if the GC is not active.
 DEFINE_FUNCTION art_quick_alloc_object_tlab
-    // Fast path tlab allocation.
     // RDI: uint32_t type_idx, RSI: ArtMethod*
     // RDX, RCX, R8, R9: free. RAX: return val.
-#if defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
-    // Might need a special macro since rsi and edx is 32b/64b mismatched.
     movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx  // Load dex cache resolved types array
     // Might need to break down into multiple instructions to get the base address in a register.
                                                                // Load the class
@@ -1181,29 +1198,69 @@
     ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
 END_FUNCTION art_quick_alloc_object_tlab
 
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
+// called with CC if the GC is not active.
+DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
+    // RDI: mirror::Class* klass, RSI: ArtMethod*
+    // RDX, RCX, R8, R9: free. RAX: return val.
+    movq %rdi, %rdx
+    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+    ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
+END_FUNCTION art_quick_alloc_object_resolved_tlab
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB).
+// May be called with CC if the GC is not active.
+DEFINE_FUNCTION art_quick_alloc_object_initialized_tlab
+    // RDI: mirror::Class* klass, RSI: ArtMethod*
+    // RDX, RCX, R8, R9: free. RAX: return val.
+    movq %rdi, %rdx
+    ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_tlab_slow_path
+.Lart_quick_alloc_object_initialized_tlab_slow_path:
+    ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB
+END_FUNCTION art_quick_alloc_object_initialized_tlab
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB).
+DEFINE_FUNCTION art_quick_alloc_array_tlab
+    // RDI: uint32_t type_idx, RSI: int32_t component_count, RDX: ArtMethod*
+    // RCX: klass, R8, R9: free. RAX: return val.
+    movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rdx), %rcx      // Load dex cache resolved types array
+    movl 0(%rcx, %rdi, COMPRESSED_REFERENCE_SIZE), %ecx        // Load the class
+    testl %ecx, %ecx
+    jz .Lart_quick_alloc_array_tlab_slow_path
+    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_tlab_slow_path
+.Lart_quick_alloc_array_tlab_slow_path:
+    ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeTLAB
+END_FUNCTION art_quick_alloc_array_tlab
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB).
+DEFINE_FUNCTION art_quick_alloc_array_resolved_tlab
+    // RDI: mirror::Class* klass, RSI: int32_t component_count, RDX: ArtMethod*
+    // RCX: mirror::Class* klass, R8, R9: free. RAX: return val.
+    movq %rdi, %rcx
+    // Already resolved, no null check.
+    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_resolved_tlab_slow_path
+.Lart_quick_alloc_array_resolved_tlab_slow_path:
+    ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedTLAB
+END_FUNCTION art_quick_alloc_array_resolved_tlab
+
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB).
 DEFINE_FUNCTION art_quick_alloc_array_region_tlab
     // Fast path region tlab allocation.
     // RDI: uint32_t type_idx, RSI: int32_t component_count, RDX: ArtMethod*
     // RCX: klass, R8, R9: free. RAX: return val.
-#if !defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
+    ASSERT_USE_READ_BARRIER
     movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rdx), %rcx      // Load dex cache resolved types array
     movl 0(%rcx, %rdi, COMPRESSED_REFERENCE_SIZE), %ecx        // Load the class
     // Null check so that we can load the lock word.
     testl %ecx, %ecx
     jz .Lart_quick_alloc_array_region_tlab_slow_path
-
-    cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
-    jne .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_marking
+    // Since we have allocation entrypoint switching, we know the GC is marking.
+    // Check the mark bit, if it is 0, do the read barrier mark.
+    testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx)
+    jz .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_slow_path
 .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_slow_path_exit:
     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_region_tlab_slow_path
-.Lart_quick_alloc_array_region_tlab_class_load_read_barrier_marking:
-    // Check the mark bit, if it is 1 return.
-    testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx)
-    jnz .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_slow_path_exit
 .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_slow_path:
     // The read barrier slow path. Mark the class.
     PUSH rdi
@@ -1226,33 +1283,11 @@
     // Fast path region tlab allocation.
     // RDI: mirror::Class* klass, RSI: int32_t component_count, RDX: ArtMethod*
     // RCX: mirror::Class* klass, R8, R9: free. RAX: return val.
-#if !defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
+    ASSERT_USE_READ_BARRIER
     movq %rdi, %rcx
+    // Caller is responsible for read barrier.
     // Already resolved, no null check.
-    cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
-    jne .Lart_quick_alloc_array_resolved_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_array_resolved_region_tlab_class_load_read_barrier_slow_path_exit:
     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_array_resolved_region_tlab_slow_path
-.Lart_quick_alloc_array_resolved_region_tlab_class_load_read_barrier_marking:
-    // Check the mark bit, if it is 1 return.
-    testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx)
-    jnz .Lart_quick_alloc_array_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_array_resolved_region_tlab_class_load_read_barrier_slow_path:
-    // The read barrier slow path. Mark the class.
-    PUSH rdi
-    PUSH rsi
-    PUSH rdx
-    // Outgoing argument set up
-    movq %rcx, %rdi                                            // Pass the class as the first param.
-    call SYMBOL(artReadBarrierMark)                            // cxx_name(mirror::Object* obj)
-    movq %rax, %rcx
-    POP rdx
-    POP rsi
-    POP rdi
-    jmp .Lart_quick_alloc_array_resolved_region_tlab_class_load_read_barrier_slow_path_exit
 .Lart_quick_alloc_array_resolved_region_tlab_slow_path:
     ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB
 END_FUNCTION art_quick_alloc_array_resolved_region_tlab
@@ -1262,24 +1297,19 @@
     // Fast path region tlab allocation.
     // RDI: uint32_t type_idx, RSI: ArtMethod*
     // RDX, RCX, R8, R9: free. RAX: return val.
-#if !defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
+    ASSERT_USE_READ_BARRIER
     movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx  // Load dex cache resolved types array
     movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx    // Load the class
     // Null check so that we can load the lock word.
     testl %edx, %edx
     jz .Lart_quick_alloc_object_region_tlab_slow_path
-    // Test if the GC is marking.
-    cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
-    jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
-    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking:
-    // Check the mark bit, if it is 1 avoid the read barrier.
+    // Since we have allocation entrypoint switching, we know the GC is marking.
+    // Check the mark bit, if it is 0, do the read barrier mark.
     testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
-    jnz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+    jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
+    // Use resolved one since we already did the null check.
+    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
 .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
     // The read barrier slow path. Mark the class.
     PUSH rdi
@@ -1302,10 +1332,7 @@
     // Fast path region tlab allocation.
     // RDI: mirror::Class* klass, RSI: ArtMethod*
     // RDX, RCX, R8, R9: free. RAX: return val.
-#if !defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
+    ASSERT_USE_READ_BARRIER
     // No read barrier since the caller is responsible for that.
     movq %rdi, %rdx
     ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
@@ -1318,10 +1345,7 @@
     // Fast path region tlab allocation.
     // RDI: mirror::Class* klass, RSI: ArtMethod*
     // RDX, RCX, R8, R9: free. RAX: return val.
-#if !defined(USE_READ_BARRIER)
-    int3
-    int3
-#endif
+    ASSERT_USE_READ_BARRIER
     movq %rdi, %rdx
     // No read barrier since the caller is responsible for that.
     ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 25b8ed2..a4a6e5a 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -54,7 +54,7 @@
 
 ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self,
                                                       const DexFile& dex_file,
-                                                      uint32_t string_idx,
+                                                      dex::StringIndex string_idx,
                                                       ObjPtr<mirror::DexCache> dex_cache) {
   StackHandleScope<1> hs(self);
   return Runtime::Current()->GetClassLinker()->ResolveString(dex_file,
diff --git a/runtime/art_field.h b/runtime/art_field.h
index cacb324..427e103 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -221,7 +221,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   ObjPtr<mirror::String> ResolveGetStringName(Thread* self,
                                               const DexFile& dex_file,
-                                              uint32_t string_idx,
+                                              dex::StringIndex string_idx,
                                               ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 1acb625..730a9c3 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -470,20 +470,6 @@
                     klass, this));
       interface_method->VisitRoots(visitor, pointer_size);
     }
-    // We know we don't have profiling information if the class hasn't been verified. Note
-    // that this check also ensures the IsNative call can be made, as IsNative expects a fully
-    // created class (and not a retired one).
-    if (klass->IsVerified()) {
-      // Runtime methods and native methods use the same field as the profiling info for
-      // storing their own data (jni entrypoint for native methods, and ImtConflictTable for
-      // some runtime methods).
-      if (!IsNative<kReadBarrierOption>() && !IsRuntimeMethod()) {
-        ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
-        if (profiling_info != nullptr) {
-          profiling_info->VisitRoots(visitor);
-        }
-      }
-    }
   }
 }
 
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 6b21a56..1dca428 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -80,7 +80,7 @@
     gCmdLine.reset(new std::string("<unset>"));
   }
 
-#ifdef __ANDROID__
+#ifdef ART_TARGET_ANDROID
 #define INIT_LOGGING_DEFAULT_LOGGER android::base::LogdLogger()
 #else
 #define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 81adaeb..7005c29 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -65,14 +65,15 @@
   return array_class.Ptr();
 }
 
-inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, ArtMethod* referrer) {
+inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
+                                                  ArtMethod* referrer) {
   Thread::PoisonObjectPointersIfDebug();
   ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
   // MethodVerifier refuses methods with string_idx out of bounds.
-  DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());
+  DCHECK_LT(string_idx.index_, declaring_class->GetDexFile().NumStringIds());
   ObjPtr<mirror::String> string =
         mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
-                                           string_idx,
+                                           string_idx.index_,
                                            mirror::DexCache::kDexCacheStringCacheSize).Read();
   if (UNLIKELY(string == nullptr)) {
     StackHandleScope<1> hs(Thread::Current());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f3aba97..0de647f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7485,7 +7485,7 @@
 }
 
 mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
-                                           uint32_t string_idx,
+                                           dex::StringIndex string_idx,
                                            Handle<mirror::DexCache> dex_cache) {
   DCHECK(dex_cache.Get() != nullptr);
   Thread::PoisonObjectPointersIfDebug();
@@ -7501,7 +7501,7 @@
 }
 
 mirror::String* ClassLinker::LookupString(const DexFile& dex_file,
-                                          uint32_t string_idx,
+                                          dex::StringIndex string_idx,
                                           Handle<mirror::DexCache> dex_cache) {
   DCHECK(dex_cache.Get() != nullptr);
   ObjPtr<mirror::String> resolved = dex_cache->GetResolvedString(string_idx);
@@ -7510,7 +7510,8 @@
   }
   uint32_t utf16_length;
   const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length);
-  ObjPtr<mirror::String> string = intern_table_->LookupStrong(Thread::Current(), utf16_length, utf8_data);
+  ObjPtr<mirror::String> string =
+      intern_table_->LookupStrong(Thread::Current(), utf16_length, utf8_data);
   if (string != nullptr) {
     dex_cache->SetResolvedString(string_idx, string);
   }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 9563448..60755cd 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -237,18 +237,20 @@
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache. The referrer is used to identify the
   // target DexCache and ClassLoader to use for resolution.
-  mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
+  mirror::String* ResolveString(dex::StringIndex string_idx, ArtMethod* referrer)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache.
-  mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
+  mirror::String* ResolveString(const DexFile& dex_file,
+                                dex::StringIndex string_idx,
                                 Handle<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Find a String with the given index from the DexFile, storing the
   // result in the DexCache if found. Return null if not found.
-  mirror::String* LookupString(const DexFile& dex_file, uint32_t string_idx,
+  mirror::String* LookupString(const DexFile& dex_file,
+                               dex::StringIndex string_idx,
                                Handle<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 77a63c1..e884e39 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -43,9 +43,9 @@
   return GetStringDataAndUtf16Length(string_id, &ignored);
 }
 
-inline const char* DexFile::StringDataAndUtf16LengthByIdx(uint32_t idx,
+inline const char* DexFile::StringDataAndUtf16LengthByIdx(dex::StringIndex idx,
                                                           uint32_t* utf16_length) const {
-  if (idx == kDexNoIndex) {
+  if (!idx.IsValid()) {
     *utf16_length = 0;
     return nullptr;
   }
@@ -53,7 +53,7 @@
   return GetStringDataAndUtf16Length(string_id, utf16_length);
 }
 
-inline const char* DexFile::StringDataByIdx(uint32_t idx) const {
+inline const char* DexFile::StringDataByIdx(dex::StringIndex idx) const {
   uint32_t unicode_length;
   return StringDataAndUtf16LengthByIdx(idx, &unicode_length);
 }
@@ -130,8 +130,8 @@
       (RoundUp(reinterpret_cast<uintptr_t>(insns_end_), 4)) + offset;
 }
 
-static inline bool DexFileStringEquals(const DexFile* df1, uint32_t sidx1,
-                                       const DexFile* df2, uint32_t sidx2) {
+static inline bool DexFileStringEquals(const DexFile* df1, dex::StringIndex sidx1,
+                                       const DexFile* df2, dex::StringIndex sidx2) {
   uint32_t s1_len;  // Note: utf16 length != mutf8 length.
   const char* s1_data = df1->StringDataAndUtf16LengthByIdx(sidx1, &s1_len);
   uint32_t s2_len;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index cc544fd..aa8fb38 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -45,6 +45,8 @@
 
 namespace art {
 
+static_assert(sizeof(dex::StringIndex) == sizeof(uint32_t), "StringIndex size is wrong");
+static_assert(std::is_trivially_copyable<dex::StringIndex>::value, "StringIndex not trivial");
 static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong");
 static_assert(std::is_trivially_copyable<dex::TypeIndex>::value, "TypeIndex not trivial");
 
@@ -602,7 +604,7 @@
                                              const DexFile::TypeId& type) const {
   // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
   const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
-  const uint32_t name_idx = GetIndexForStringId(name);
+  const dex::StringIndex name_idx = GetIndexForStringId(name);
   const dex::TypeIndex type_idx = GetIndexForTypeId(type);
   int32_t lo = 0;
   int32_t hi = NumFieldIds() - 1;
@@ -637,7 +639,7 @@
                                                const DexFile::ProtoId& signature) const {
   // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
   const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
-  const uint32_t name_idx = GetIndexForStringId(name);
+  const dex::StringIndex name_idx = GetIndexForStringId(name);
   const uint16_t proto_idx = GetIndexForProtoId(signature);
   int32_t lo = 0;
   int32_t hi = NumMethodIds() - 1;
@@ -672,7 +674,7 @@
   int32_t hi = NumStringIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::StringId& str_id = GetStringId(mid);
+    const DexFile::StringId& str_id = GetStringId(dex::StringIndex(mid));
     const char* str = GetStringData(str_id);
     int compare = CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(string, str);
     if (compare > 0) {
@@ -711,7 +713,7 @@
   int32_t hi = NumStringIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::StringId& str_id = GetStringId(mid);
+    const DexFile::StringId& str_id = GetStringId(dex::StringIndex(mid));
     const char* str = GetStringData(str_id);
     int compare = CompareModifiedUtf8ToUtf16AsCodePointValues(str, string, length);
     if (compare > 0) {
@@ -725,7 +727,7 @@
   return nullptr;
 }
 
-const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
+const DexFile::TypeId* DexFile::FindTypeId(dex::StringIndex string_idx) const {
   int32_t lo = 0;
   int32_t hi = NumTypeIds() - 1;
   while (hi >= lo) {
@@ -912,7 +914,7 @@
     }
     uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
     const char* descriptor = it.GetDescriptor();
-    local_in_reg[arg_reg].name_ = StringDataByIdx(name_idx);
+    local_in_reg[arg_reg].name_ = StringDataByIdx(dex::StringIndex(name_idx));
     local_in_reg[arg_reg].descriptor_ = descriptor;
     local_in_reg[arg_reg].signature_ = nullptr;
     local_in_reg[arg_reg].start_address_ = 0;
@@ -975,10 +977,10 @@
           local_cb(context, local_in_reg[reg]);
         }
 
-        local_in_reg[reg].name_ = StringDataByIdx(name_idx);
+        local_in_reg[reg].name_ = StringDataByIdx(dex::StringIndex(name_idx));
         local_in_reg[reg].descriptor_ =
             StringByTypeIdx(dex::TypeIndex(dchecked_integral_cast<uint16_t>(descriptor_idx)));;
-        local_in_reg[reg].signature_ = StringDataByIdx(signature_idx);
+        local_in_reg[reg].signature_ = StringDataByIdx(dex::StringIndex(signature_idx));
         local_in_reg[reg].start_address_ = address;
         local_in_reg[reg].reg_ = reg;
         local_in_reg[reg].is_live_ = true;
@@ -1080,7 +1082,7 @@
         break;
       case DBG_SET_FILE: {
         uint32_t name_idx = DecodeUnsignedLeb128P1(&stream);
-        entry.source_file_ = StringDataByIdx(name_idx);
+        entry.source_file_ = StringDataByIdx(dex::StringIndex(name_idx));
         break;
       }
       default: {
@@ -1482,6 +1484,11 @@
 
 namespace dex {
 
+std::ostream& operator<<(std::ostream& os, const StringIndex& index) {
+  os << "StringIndex[" << index.index_ << "]";
+  return os;
+}
+
 std::ostream& operator<<(std::ostream& os, const TypeIndex& index) {
   os << "TypeIndex[" << index.index_ << "]";
   return os;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 2384eb6..250795b 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -152,7 +152,7 @@
 
   // Raw type_id_item.
   struct TypeId {
-    uint32_t descriptor_idx_;  // index into string_ids
+    dex::StringIndex descriptor_idx_;  // index into string_ids
 
    private:
     DISALLOW_COPY_AND_ASSIGN(TypeId);
@@ -160,9 +160,9 @@
 
   // Raw field_id_item.
   struct FieldId {
-    dex::TypeIndex class_idx_;  // index into type_ids_ array for defining class
-    dex::TypeIndex type_idx_;  // index into type_ids_ array for field type
-    uint32_t name_idx_;  // index into string_ids_ array for field name
+    dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
+    dex::TypeIndex type_idx_;    // index into type_ids_ array for field type
+    dex::StringIndex name_idx_;  // index into string_ids_ array for field name
 
    private:
     DISALLOW_COPY_AND_ASSIGN(FieldId);
@@ -170,10 +170,10 @@
 
   // Raw proto_id_item.
   struct ProtoId {
-    uint32_t shorty_idx_;        // index into string_ids array for shorty descriptor
+    dex::StringIndex shorty_idx_;     // index into string_ids array for shorty descriptor
     dex::TypeIndex return_type_idx_;  // index into type_ids array for return type
-    uint16_t pad_;               // padding = 0
-    uint32_t parameters_off_;    // file offset to type_list for parameter types
+    uint16_t pad_;                    // padding = 0
+    uint32_t parameters_off_;         // file offset to type_list for parameter types
 
    private:
     DISALLOW_COPY_AND_ASSIGN(ProtoId);
@@ -182,8 +182,8 @@
   // Raw method_id_item.
   struct MethodId {
     dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
-    uint16_t proto_idx_;  // index into proto_ids_ array for method prototype
-    uint32_t name_idx_;  // index into string_ids_ array for method name
+    uint16_t proto_idx_;         // index into proto_ids_ array for method prototype
+    dex::StringIndex name_idx_;  // index into string_ids_ array for method name
 
    private:
     DISALLOW_COPY_AND_ASSIGN(MethodId);
@@ -197,7 +197,7 @@
     dex::TypeIndex superclass_idx_;  // index into type_ids_ array for superclass
     uint16_t pad2_;  // padding = 0
     uint32_t interfaces_off_;  // file offset to TypeList
-    uint32_t source_file_idx_;  // index into string_ids_ for source file name
+    dex::StringIndex source_file_idx_;  // index into string_ids_ for source file name
     uint32_t annotations_off_;  // file offset to annotations_directory_item
     uint32_t class_data_off_;  // file offset to class_data_item
     uint32_t static_values_off_;  // file offset to EncodedArray
@@ -501,15 +501,15 @@
   }
 
   // Returns the StringId at the specified index.
-  const StringId& GetStringId(uint32_t idx) const {
-    DCHECK_LT(idx, NumStringIds()) << GetLocation();
-    return string_ids_[idx];
+  const StringId& GetStringId(dex::StringIndex idx) const {
+    DCHECK_LT(idx.index_, NumStringIds()) << GetLocation();
+    return string_ids_[idx.index_];
   }
 
-  uint32_t GetIndexForStringId(const StringId& string_id) const {
+  dex::StringIndex GetIndexForStringId(const StringId& string_id) const {
     CHECK_GE(&string_id, string_ids_) << GetLocation();
     CHECK_LT(&string_id, string_ids_ + header_->string_ids_size_) << GetLocation();
-    return &string_id - string_ids_;
+    return dex::StringIndex(&string_id - string_ids_);
   }
 
   int32_t GetStringLength(const StringId& string_id) const;
@@ -522,9 +522,9 @@
   const char* GetStringData(const StringId& string_id) const;
 
   // Index version of GetStringDataAndUtf16Length.
-  const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const;
+  const char* StringDataAndUtf16LengthByIdx(dex::StringIndex idx, uint32_t* utf16_length) const;
 
-  const char* StringDataByIdx(uint32_t idx) const;
+  const char* StringDataByIdx(dex::StringIndex idx) const;
 
   // Looks up a string id for a given modified utf8 string.
   const StringId* FindStringId(const char* string) const;
@@ -563,7 +563,7 @@
   const char* GetTypeDescriptor(const TypeId& type_id) const;
 
   // Looks up a type for the given string index
-  const TypeId* FindTypeId(uint32_t string_idx) const;
+  const TypeId* FindTypeId(dex::StringIndex string_idx) const;
 
   // Returns the number of field identifiers in the .dex file.
   size_t NumFieldIds() const {
@@ -963,7 +963,7 @@
                                void* context) const;
 
   const char* GetSourceFile(const ClassDef& class_def) const {
-    if (class_def.source_file_idx_ == 0xffffffff) {
+    if (!class_def.source_file_idx_.IsValid()) {
       return nullptr;
     } else {
       return StringDataByIdx(class_def.source_file_idx_);
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 3fe2c40..52b9f11 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -167,7 +167,8 @@
 
   while (size != 0) {
     uint32_t element_name_index = DecodeUnsignedLeb128(&annotation);
-    const char* element_name = dex_file.GetStringData(dex_file.GetStringId(element_name_index));
+    const char* element_name =
+        dex_file.GetStringData(dex_file.GetStringId(dex::StringIndex(element_name_index)));
     if (strcmp(name, element_name) == 0) {
       return annotation;
     }
@@ -357,7 +358,7 @@
         StackHandleScope<1> hs(self);
         Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
         element_object = Runtime::Current()->GetClassLinker()->ResolveString(
-            klass->GetDexFile(), index, dex_cache);
+            klass->GetDexFile(), dex::StringIndex(index), dex_cache);
         set_object = true;
         if (element_object == nullptr) {
           return false;
@@ -592,7 +593,7 @@
   ScopedObjectAccessUnchecked soa(self);
   StackHandleScope<5> hs(self);
   uint32_t element_name_index = DecodeUnsignedLeb128(annotation);
-  const char* name = dex_file.StringDataByIdx(element_name_index);
+  const char* name = dex_file.StringDataByIdx(dex::StringIndex(element_name_index));
   Handle<mirror::String> string_name(
       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name)));
 
@@ -1341,7 +1342,9 @@
     case kDouble:  field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
     case kNull:    field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break;
     case kString: {
-      mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
+      mirror::String* resolved = linker_->ResolveString(dex_file_,
+                                                        dex::StringIndex(jval_.i),
+                                                        *dex_cache_);
       field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
       break;
     }
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index f94d07b..0fec856 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -418,7 +418,7 @@
     const char* type_str = java_lang_dex_file_->StringByTypeIdx(dex::TypeIndex(i));
     const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
     ASSERT_TRUE(type_str_id != nullptr);
-    uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
+    dex::StringIndex type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
     const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
     ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
     ASSERT_TRUE(type_id != nullptr);
diff --git a/runtime/dex_file_types.h b/runtime/dex_file_types.h
index c6d95a1..bd779c4 100644
--- a/runtime/dex_file_types.h
+++ b/runtime/dex_file_types.h
@@ -23,12 +23,47 @@
 namespace art {
 namespace dex {
 
+class StringIndex {
+ public:
+  uint32_t index_;
+
+  constexpr StringIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
+  explicit constexpr StringIndex(uint32_t idx) : index_(idx) {}
+
+  bool IsValid() const {
+    return index_ != std::numeric_limits<decltype(index_)>::max();
+  }
+  static StringIndex Invalid() {
+    return StringIndex(std::numeric_limits<decltype(index_)>::max());
+  }
+
+  bool operator==(const StringIndex& other) const {
+    return index_ == other.index_;
+  }
+  bool operator!=(const StringIndex& other) const {
+    return index_ != other.index_;
+  }
+  bool operator<(const StringIndex& other) const {
+    return index_ < other.index_;
+  }
+  bool operator<=(const StringIndex& other) const {
+    return index_ <= other.index_;
+  }
+  bool operator>(const StringIndex& other) const {
+    return index_ > other.index_;
+  }
+  bool operator>=(const StringIndex& other) const {
+    return index_ >= other.index_;
+  }
+};
+std::ostream& operator<<(std::ostream& os, const StringIndex& index);
+
 class TypeIndex {
  public:
   uint16_t index_;
 
-  TypeIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
-  explicit TypeIndex(uint16_t idx) : index_(idx) {}
+  constexpr TypeIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
+  explicit constexpr TypeIndex(uint16_t idx) : index_(idx) {}
 
   bool IsValid() const {
     return index_ != std::numeric_limits<decltype(index_)>::max();
@@ -63,6 +98,12 @@
 
 namespace std {
 
+template<> struct hash<art::dex::StringIndex> {
+  size_t operator()(const art::dex::StringIndex& index) const {
+    return hash<uint32_t>()(index.index_);
+  }
+};
+
 template<> struct hash<art::dex::TypeIndex> {
   size_t operator()(const art::dex::TypeIndex& index) const {
     return hash<uint16_t>()(index.index_);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index ed50711..07f0fca 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -80,8 +80,8 @@
   return true;
 }
 
-const char* DexFileVerifier::CheckLoadStringByIdx(uint32_t idx, const char* error_string) {
-  if (UNLIKELY(!CheckIndex(idx, dex_file_->NumStringIds(), error_string))) {
+const char* DexFileVerifier::CheckLoadStringByIdx(dex::StringIndex idx, const char* error_string) {
+  if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumStringIds(), error_string))) {
     return nullptr;
   }
   return dex_file_->StringDataByIdx(idx);
@@ -92,9 +92,7 @@
   if (UNLIKELY(!CheckIndex(type_idx.index_, dex_file_->NumTypeIds(), error_string))) {
     return nullptr;
   }
-  const DexFile::TypeId& type_id = dex_file_->GetTypeId(type_idx);
-  uint32_t idx = type_id.descriptor_idx_;
-  return CheckLoadStringByIdx(idx, error_string);
+  return CheckLoadStringByIdx(dex_file_->GetTypeId(type_idx).descriptor_idx_, error_string);
 }
 
 const DexFile::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
@@ -1782,7 +1780,8 @@
     const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
     if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
       ErrorStringPrintf("Out-of-order type_ids: %x then %x",
-                        prev_item->descriptor_idx_, item->descriptor_idx_);
+                        prev_item->descriptor_idx_.index_,
+                        item->descriptor_idx_.index_);
       return false;
     }
   }
@@ -2500,14 +2499,15 @@
 
 static std::string GetStringOrError(const uint8_t* const begin,
                                     const DexFile::Header* const header,
-                                    uint32_t string_idx) {
+                                    dex::StringIndex string_idx) {
   // The `string_idx` is not guaranteed to be valid yet.
-  if (header->string_ids_size_ <= string_idx) {
+  if (header->string_ids_size_ <= string_idx.index_) {
     return "(error)";
   }
 
   const DexFile::StringId* string_id =
-      reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_) + string_idx;
+      reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_)
+          + string_idx.index_;
 
   // Assume that the data is OK at this point. String data has been checked at this point.
 
@@ -2664,7 +2664,7 @@
   }
   uint32_t string_idx =
       (reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) +
-          method_index)->name_idx_;
+          method_index)->name_idx_.index_;
   if (string_idx >= header->string_ids_size_) {
     *error_msg = "String index not available for method flags verification";
     return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 19a89de..0327367 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -150,7 +150,7 @@
 
   // Load a string by (type) index. Checks whether the index is in bounds, printing the error if
   // not. If there is an error, null is returned.
-  const char* CheckLoadStringByIdx(uint32_t idx, const char* error_fmt);
+  const char* CheckLoadStringByIdx(dex::StringIndex idx, const char* error_fmt);
   const char* CheckLoadStringByTypeIdx(dex::TypeIndex type_idx, const char* error_fmt);
 
   // Load a field/method Id by index. Checks whether the index is in bounds, printing the error if
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 0e0929f..f14b1d5 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -176,7 +176,7 @@
       "method_id_name_idx",
       [](DexFile* dex_file) {
         DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
-        method_id->name_idx_ = 0xFF;
+        method_id->name_idx_ = dex::StringIndex(0xFF);
       },
       "String index not available for method flags verification");
 }
@@ -247,7 +247,7 @@
 
   while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
     uint32_t method_index = it.GetMemberIndex();
-    uint32_t name_index = dex_file->GetMethodId(method_index).name_idx_;
+    dex::StringIndex name_index = dex_file->GetMethodId(method_index).name_idx_;
     const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
     if (strcmp(name, str) == 0) {
@@ -635,7 +635,7 @@
         uint32_t method_idx;
         FindMethodData(dex_file, "foo", &method_idx);
         auto* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(method_idx));
-        method_id->name_idx_ = dex_file->NumStringIds();
+        method_id->name_idx_ = dex::StringIndex(dex_file->NumStringIds());
       },
       "Method may have only one of public/protected/private, LMethodFlags;.(error)");
 }
@@ -856,7 +856,7 @@
 
   while (it.HasNextStaticField() || it.HasNextInstanceField()) {
     uint32_t field_index = it.GetMemberIndex();
-    uint32_t name_index = dex_file->GetFieldId(field_index).name_idx_;
+    dex::StringIndex name_index = dex_file->GetFieldId(field_index).name_idx_;
     const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
     const char* str = dex_file->GetStringData(string_id);
     if (strcmp(name, str) == 0) {
@@ -1451,12 +1451,12 @@
             // Swap the proto parameters and shorties to break the ordering.
             std::swap(const_cast<uint32_t&>(proto1.parameters_off_),
                       const_cast<uint32_t&>(proto2.parameters_off_));
-            std::swap(const_cast<uint32_t&>(proto1.shorty_idx_),
-                      const_cast<uint32_t&>(proto2.shorty_idx_));
+            std::swap(const_cast<dex::StringIndex&>(proto1.shorty_idx_),
+                      const_cast<dex::StringIndex&>(proto2.shorty_idx_));
           } else {
             // Copy the proto parameters and shorty to create duplicate proto id.
             const_cast<uint32_t&>(proto1.parameters_off_) = proto2.parameters_off_;
-            const_cast<uint32_t&>(proto1.shorty_idx_) = proto2.shorty_idx_;
+            const_cast<dex::StringIndex&>(proto1.shorty_idx_) = proto2.shorty_idx_;
           }
         },
         "Out-of-order proto_id arguments");
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 751bd51..9902389 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -191,10 +191,11 @@
           if (file != nullptr) {
             uint32_t string_idx = VRegB_21c();
             if (string_idx < file->NumStringIds()) {
-              os << StringPrintf("const-string v%d, %s // string@%d",
-                                 VRegA_21c(),
-                                 PrintableString(file->StringDataByIdx(string_idx)).c_str(),
-                                 string_idx);
+              os << StringPrintf(
+                  "const-string v%d, %s // string@%d",
+                  VRegA_21c(),
+                  PrintableString(file->StringDataByIdx(dex::StringIndex(string_idx))).c_str(),
+                  string_idx);
             } else {
               os << StringPrintf("const-string v%d, <<invalid-string-idx-%d>> // string@%d",
                                  VRegA_21c(),
@@ -333,11 +334,12 @@
         uint32_t string_idx = VRegB_31c();
         if (file != nullptr) {
           if (string_idx < file->NumStringIds()) {
-            os << StringPrintf("%s v%d, %s // string@%d",
-                               opcode,
-                               VRegA_31c(),
-                               PrintableString(file->StringDataByIdx(string_idx)).c_str(),
-                               string_idx);
+            os << StringPrintf(
+                "%s v%d, %s // string@%d",
+                opcode,
+                VRegA_31c(),
+                PrintableString(file->StringDataByIdx(dex::StringIndex(string_idx))).c_str(),
+                string_idx);
           } else {
             os << StringPrintf("%s v%d, <<invalid-string-idx-%d>> // string@%d",
                                opcode,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ac52f4e..f6eeffc 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -826,7 +826,7 @@
   return h_class.Get();
 }
 
-inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) {
+inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, dex::StringIndex string_idx) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   return class_linker->ResolveString(string_idx, referrer);
 }
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index d87dc67..7cc136e 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -188,7 +188,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
-inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
+inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, dex::StringIndex string_idx)
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 397655a..82bb8e5 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -292,7 +292,7 @@
   entry_points_instrumented = instrumented;
 }
 
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints, bool is_marking) {
 #if !defined(__APPLE__) || !defined(__LP64__)
   switch (entry_points_allocator) {
     case gc::kAllocatorTypeDlMalloc: {
@@ -320,7 +320,12 @@
     }
     case gc::kAllocatorTypeRegionTLAB: {
       CHECK(kMovingCollector);
-      SetQuickAllocEntryPoints_region_tlab(qpoints, entry_points_instrumented);
+      if (is_marking) {
+        SetQuickAllocEntryPoints_region_tlab(qpoints, entry_points_instrumented);
+      } else {
+        // Not marking means we need no read barriers and can just use the normal TLAB case.
+        SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
+      }
       return;
     }
     default:
@@ -328,6 +333,7 @@
   }
 #else
   UNUSED(qpoints);
+  UNUSED(is_marking);
 #endif
   UNIMPLEMENTED(FATAL);
   UNREACHABLE();
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index 14a8e04..bd1e295 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -23,7 +23,9 @@
 
 namespace art {
 
-void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+// is_marking is only used for CC, if the GC is marking the allocation entrypoint is the marking
+// one.
+void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints, bool is_marking);
 
 // Runtime shutdown lock is necessary to prevent races in thread initialization. When the thread is
 // starting it doesn't hold the mutator lock until after it has been added to the thread list.
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index df23f94..78dad94 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -31,7 +31,7 @@
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
 
   // Alloc
-  ResetQuickAllocEntryPoints(qpoints);
+  ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
 
   // DexCache
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index b1259e1..5dad43e 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -66,7 +66,7 @@
       // TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
       (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
                                                        : Runtime::kSaveEverything);
-  mirror::String* result = ResolveStringFromCode(caller, string_idx);
+  mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
   if (LIKELY(result != nullptr)) {
     // For AOT code, we need a write barrier for the class loader that holds
     // the GC roots in the .bss.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8353b26..19ee0fb 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2145,14 +2145,18 @@
       to_ref->SetReadBarrierState(ReadBarrier::GrayState());
     }
 
+    // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
+    // before the object copy.
+    QuasiAtomic::ThreadFenceRelease();
+
     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
 
     // Try to atomically write the fwd ptr.
-    bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
+    bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
     if (LIKELY(success)) {
       // The CAS succeeded.
-      objects_moved_.FetchAndAddSequentiallyConsistent(1);
-      bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
+      objects_moved_.FetchAndAddRelaxed(1);
+      bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size);
       if (LIKELY(!fall_back_to_non_moving)) {
         DCHECK(region_space_->IsInToSpace(to_ref));
       } else {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 673a97e..06ed029 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -413,42 +413,48 @@
     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
                  (kIsDebugBuild && large_object_space != nullptr &&
                      !large_object_space->Contains(obj)))) {
-      LOG(FATAL_WITHOUT_ABORT) << "Tried to mark " << obj << " not contained by any spaces";
+      // Lowest priority logging first:
+      PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+      // Buffer the output in the string stream since it is more important than the stack traces
+      // and we want it to have log priority. The stack traces are printed from Runtime::Abort
+      // which is called from LOG(FATAL) but before the abort message.
+      std::ostringstream oss;
+      oss << "Tried to mark " << obj << " not contained by any spaces" << std::endl;
       if (holder_ != nullptr) {
         size_t holder_size = holder_->SizeOf();
         ArtField* field = holder_->FindFieldByOffset(offset_);
-        LOG(FATAL_WITHOUT_ABORT) << "Field info: "
-                            << " holder=" << holder_
-                            << " holder is "
-                            << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
-                                ? "alive" : "dead")
-                            << " holder_size=" << holder_size
-                            << " holder_type=" << holder_->PrettyTypeOf()
-                            << " offset=" << offset_.Uint32Value()
-                            << " field=" << (field != nullptr ? field->GetName() : "nullptr")
-                            << " field_type="
-                            << (field != nullptr ? field->GetTypeDescriptor() : "")
-                            << " first_ref_field_offset="
-                            << (holder_->IsClass()
-                                ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
-                                    kRuntimePointerSize)
-                                : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
-                            << " num_of_ref_fields="
-                            << (holder_->IsClass()
-                                ? holder_->AsClass()->NumReferenceStaticFields()
-                                : holder_->GetClass()->NumReferenceInstanceFields());
+        oss << "Field info: "
+            << " holder=" << holder_
+            << " holder is "
+            << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
+                ? "alive" : "dead")
+            << " holder_size=" << holder_size
+            << " holder_type=" << holder_->PrettyTypeOf()
+            << " offset=" << offset_.Uint32Value()
+            << " field=" << (field != nullptr ? field->GetName() : "nullptr")
+            << " field_type="
+            << (field != nullptr ? field->GetTypeDescriptor() : "")
+            << " first_ref_field_offset="
+            << (holder_->IsClass()
+                ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
+                    kRuntimePointerSize)
+                : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
+            << " num_of_ref_fields="
+            << (holder_->IsClass()
+                ? holder_->AsClass()->NumReferenceStaticFields()
+                : holder_->GetClass()->NumReferenceInstanceFields())
+            << std::endl;
         // Print the memory content of the holder.
         for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
           uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
-          LOG(FATAL_WITHOUT_ABORT) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
-                              << std::hex << p[i];
+          oss << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " << std::hex << p[i]
+              << std::endl;
         }
       }
-      PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
-      MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
-      LOG(FATAL_WITHOUT_ABORT) << "Attempting see if it's a bad thread root";
-      mark_sweep_->VerifySuspendedThreadRoots();
-      LOG(FATAL) << "Can't mark invalid object";
+      oss << "Attempting see if it's a bad thread root" << std::endl;
+      mark_sweep_->VerifySuspendedThreadRoots(oss);
+      LOG(FATAL) << oss.str();
     }
   }
 
@@ -567,6 +573,8 @@
 
 class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
  public:
+  explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
+
   void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // See if the root is on any space bitmap.
@@ -574,14 +582,17 @@
     if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
       space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
       if (large_object_space != nullptr && !large_object_space->Contains(root)) {
-        LOG(FATAL_WITHOUT_ABORT) << "Found invalid root: " << root << " " << info;
+        os_ << "Found invalid root: " << root << " " << info << std::endl;
       }
     }
   }
+
+ private:
+  std::ostream& os_;
 };
 
-void MarkSweep::VerifySuspendedThreadRoots() {
-  VerifyRootVisitor visitor;
+void MarkSweep::VerifySuspendedThreadRoots(std::ostream& os) {
+  VerifyRootVisitor visitor(os);
   Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
 }
 
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a94cb27..02cf462 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -250,7 +250,7 @@
 
   // Verify the roots of the heap and print out information related to any invalid roots.
   // Called in MarkObject, so may we may not hold the mutator lock.
-  void VerifySuspendedThreadRoots()
+  void VerifySuspendedThreadRoots(std::ostream& os)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Expand mark stack to 2x its current size.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 97129e8..54f2210 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -247,7 +247,7 @@
   if (allocator_type != kAllocatorTypeTLAB &&
       allocator_type != kAllocatorTypeRegionTLAB &&
       allocator_type != kAllocatorTypeRosAlloc &&
-      UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
+      UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
     return nullptr;
   }
   mirror::Object* ret;
@@ -267,8 +267,9 @@
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
         // If running on valgrind or asan, we should be using the instrumented path.
         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
-        if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
-                                                      max_bytes_tl_bulk_allocated))) {
+        if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
+                                               max_bytes_tl_bulk_allocated,
+                                               kGrow))) {
           return nullptr;
         }
         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
@@ -277,14 +278,18 @@
         DCHECK(!is_running_on_memory_tool_);
         size_t max_bytes_tl_bulk_allocated =
             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
-        if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
-                                                      max_bytes_tl_bulk_allocated))) {
+        if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
+                                               max_bytes_tl_bulk_allocated,
+                                               kGrow))) {
           return nullptr;
         }
         if (!kInstrumented) {
           DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
         }
-        ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+        ret = rosalloc_space_->AllocNonvirtual(self,
+                                               alloc_size,
+                                               bytes_allocated,
+                                               usable_size,
                                                bytes_tl_bulk_allocated);
       }
       break;
@@ -292,22 +297,34 @@
     case kAllocatorTypeDlMalloc: {
       if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
         // If running on valgrind, we should be using the instrumented path.
-        ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+        ret = dlmalloc_space_->Alloc(self,
+                                     alloc_size,
+                                     bytes_allocated,
+                                     usable_size,
                                      bytes_tl_bulk_allocated);
       } else {
         DCHECK(!is_running_on_memory_tool_);
-        ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+        ret = dlmalloc_space_->AllocNonvirtual(self,
+                                               alloc_size,
+                                               bytes_allocated,
+                                               usable_size,
                                                bytes_tl_bulk_allocated);
       }
       break;
     }
     case kAllocatorTypeNonMoving: {
-      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+      ret = non_moving_space_->Alloc(self,
+                                     alloc_size,
+                                     bytes_allocated,
+                                     usable_size,
                                      bytes_tl_bulk_allocated);
       break;
     }
     case kAllocatorTypeLOS: {
-      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+      ret = large_object_space_->Alloc(self,
+                                       alloc_size,
+                                       bytes_allocated,
+                                       usable_size,
                                        bytes_tl_bulk_allocated);
       // Note that the bump pointer spaces aren't necessarily next to
       // the other continuous spaces like the non-moving alloc space or
@@ -315,80 +332,38 @@
       DCHECK(ret == nullptr || large_object_space_->Contains(ret));
       break;
     }
-    case kAllocatorTypeTLAB: {
-      DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment);
-      if (UNLIKELY(self->TlabSize() < alloc_size)) {
-        const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
-        if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) {
-          return nullptr;
-        }
-        // Try allocating a new thread local buffer, if the allocaiton fails the space must be
-        // full so return null.
-        if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
-          return nullptr;
-        }
-        *bytes_tl_bulk_allocated = new_tlab_size;
-      } else {
-        *bytes_tl_bulk_allocated = 0;
-      }
-      // The allocation can't fail.
-      ret = self->AllocTlab(alloc_size);
-      DCHECK(ret != nullptr);
-      *bytes_allocated = alloc_size;
-      *usable_size = alloc_size;
-      break;
-    }
     case kAllocatorTypeRegion: {
       DCHECK(region_space_ != nullptr);
       alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
-      ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+      ret = region_space_->AllocNonvirtual<false>(alloc_size,
+                                                  bytes_allocated,
+                                                  usable_size,
                                                   bytes_tl_bulk_allocated);
       break;
     }
+    case kAllocatorTypeTLAB:
+      FALLTHROUGH_INTENDED;
     case kAllocatorTypeRegionTLAB: {
-      DCHECK(region_space_ != nullptr);
-      DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
+      DCHECK_ALIGNED(alloc_size, kObjectAlignment);
+      static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
+                    "mismatched alignments");
+      static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
+                    "mismatched alignments");
       if (UNLIKELY(self->TlabSize() < alloc_size)) {
-        if (space::RegionSpace::kRegionSize >= alloc_size) {
-          // Non-large. Check OOME for a tlab.
-          if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) {
-            // Try to allocate a tlab.
-            if (!region_space_->AllocNewTlab(self)) {
-              // Failed to allocate a tlab. Try non-tlab.
-              ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
-                                                          bytes_tl_bulk_allocated);
-              return ret;
-            }
-            *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
-            // Fall-through.
-          } else {
-            // Check OOME for a non-tlab allocation.
-            if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
-              ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
-                                                          bytes_tl_bulk_allocated);
-              return ret;
-            } else {
-              // Neither tlab or non-tlab works. Give up.
-              return nullptr;
-            }
-          }
-        } else {
-          // Large. Check OOME.
-          if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
-            ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
-                                                        bytes_tl_bulk_allocated);
-            return ret;
-          } else {
-            return nullptr;
-          }
-        }
-      } else {
-        *bytes_tl_bulk_allocated = 0;  // Allocated in an existing buffer.
+        // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
+        // that is why the allocator is not passed down.
+        return AllocWithNewTLAB(self,
+                                alloc_size,
+                                kGrow,
+                                bytes_allocated,
+                                usable_size,
+                                bytes_tl_bulk_allocated);
       }
       // The allocation can't fail.
       ret = self->AllocTlab(alloc_size);
       DCHECK(ret != nullptr);
       *bytes_allocated = alloc_size;
+      *bytes_tl_bulk_allocated = 0;  // Allocated in an existing buffer.
       *usable_size = alloc_size;
       break;
     }
@@ -408,15 +383,16 @@
   return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
 }
 
-template <bool kGrow>
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
+inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+                                            size_t alloc_size,
+                                            bool grow) {
   size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
   if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
     if (UNLIKELY(new_footprint > growth_limit_)) {
       return true;
     }
     if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
-      if (!kGrow) {
+      if (!grow) {
         return true;
       }
       // TODO: Grow for allocation is racy, fix it.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f0e619d..5c219cc 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1819,7 +1819,7 @@
           break;
         }
         // Try to transition the heap if the allocation failure was due to the space being full.
-        if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
+        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
           // If we aren't out of memory then the OOM was probably from the non moving space being
           // full. Attempt to disable compaction and turn the main space into a non moving space.
           DisableMovingGc();
@@ -2742,12 +2742,6 @@
     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
 
-  // It's time to clear all inline caches, in case some classes can be unloaded.
-  if (((gc_type == collector::kGcTypeFull) || (gc_type == collector::kGcTypePartial)) &&
-      (runtime->GetJit() != nullptr)) {
-    runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
-  }
-
   CHECK(collector != nullptr)
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
@@ -4225,5 +4219,72 @@
   gc_pause_listener_.StoreRelaxed(nullptr);
 }
 
+mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
+                                       size_t alloc_size,
+                                       bool grow,
+                                       size_t* bytes_allocated,
+                                       size_t* usable_size,
+                                       size_t* bytes_tl_bulk_allocated) {
+  const AllocatorType allocator_type = GetCurrentAllocator();
+  if (allocator_type == kAllocatorTypeTLAB) {
+    DCHECK(bump_pointer_space_ != nullptr);
+    const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
+    if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
+      return nullptr;
+    }
+    // Try allocating a new thread local buffer, if the allocation fails the space must be
+    // full so return null.
+    if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
+      return nullptr;
+    }
+    *bytes_tl_bulk_allocated = new_tlab_size;
+  } else {
+    DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
+    DCHECK(region_space_ != nullptr);
+    if (space::RegionSpace::kRegionSize >= alloc_size) {
+      // Non-large. Check OOME for a tlab.
+      if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
+                                            space::RegionSpace::kRegionSize,
+                                            grow))) {
+        // Try to allocate a tlab.
+        if (!region_space_->AllocNewTlab(self)) {
+          // Failed to allocate a tlab. Try non-tlab.
+          return region_space_->AllocNonvirtual<false>(alloc_size,
+                                                       bytes_allocated,
+                                                       usable_size,
+                                                       bytes_tl_bulk_allocated);
+        }
+        *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
+        // Fall-through to using the TLAB below.
+      } else {
+        // Check OOME for a non-tlab allocation.
+        if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
+          return region_space_->AllocNonvirtual<false>(alloc_size,
+                                                       bytes_allocated,
+                                                       usable_size,
+                                                       bytes_tl_bulk_allocated);
+        }
+        // Neither tlab or non-tlab works. Give up.
+        return nullptr;
+      }
+    } else {
+      // Large. Check OOME.
+      if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
+        return region_space_->AllocNonvirtual<false>(alloc_size,
+                                                     bytes_allocated,
+                                                     usable_size,
+                                                     bytes_tl_bulk_allocated);
+      }
+      return nullptr;
+    }
+  }
+  // Refilled TLAB, return.
+  mirror::Object* ret = self->AllocTlab(alloc_size);
+  DCHECK(ret != nullptr);
+  *bytes_allocated = alloc_size;
+  *usable_size = alloc_size;
+  return ret;
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0c671d2..3a8e29b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -854,6 +854,10 @@
         allocator_type != kAllocatorTypeRegionTLAB;
   }
   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
+    if (kUseReadBarrier) {
+      // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
+      return true;
+    }
     return
         allocator_type != kAllocatorTypeBumpPointer &&
         allocator_type != kAllocatorTypeTLAB;
@@ -923,11 +927,20 @@
                                               size_t* bytes_tl_bulk_allocated)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  mirror::Object* AllocWithNewTLAB(Thread* self,
+                                   size_t alloc_size,
+                                   bool grow,
+                                   size_t* bytes_allocated,
+                                   size_t* usable_size,
+                                   size_t* bytes_tl_bulk_allocated)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template <bool kGrow>
-  ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+  ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+                                               size_t alloc_size,
+                                               bool grow);
 
   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
   void RunFinalization(JNIEnv* env, uint64_t timeout);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index d4c322e..870d1ae 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -630,7 +630,7 @@
 }
 
 static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
-  thread->ResetQuickAllocEntryPointsForThread();
+  thread->ResetQuickAllocEntryPointsForThread(kUseReadBarrier && thread->GetIsGcMarking());
 }
 
 void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9c26d24..c9a5b44 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -236,7 +236,7 @@
 // java.lang.String class is initialized.
 static inline ObjPtr<mirror::String> ResolveString(Thread* self,
                                                    ShadowFrame& shadow_frame,
-                                                   uint32_t string_idx)
+                                                   dex::StringIndex string_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> java_lang_string_class = mirror::String::GetJavaLangString();
   if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
@@ -251,11 +251,11 @@
   ArtMethod* method = shadow_frame.GetMethod();
   ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
   // MethodVerifier refuses methods with string_idx out of bounds.
-  DCHECK_LT(string_idx % mirror::DexCache::kDexCacheStringCacheSize,
+  DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
             declaring_class->GetDexFile().NumStringIds());
   ObjPtr<mirror::String> string_ptr =
       mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
-                                         string_idx,
+                                         string_idx.index_,
                                          mirror::DexCache::kDexCacheStringCacheSize).Read();
   if (UNLIKELY(string_ptr == nullptr)) {
     StackHandleScope<1> hs(self);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 22c0fe0..52eacd5 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -373,7 +373,9 @@
         break;
       case Instruction::CONST_STRING: {
         PREAMBLE();
-        ObjPtr<mirror::String> s = ResolveString(self, shadow_frame,  inst->VRegB_21c());
+        ObjPtr<mirror::String> s = ResolveString(self,
+                                                 shadow_frame,
+                                                 dex::StringIndex(inst->VRegB_21c()));
         if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
@@ -384,7 +386,9 @@
       }
       case Instruction::CONST_STRING_JUMBO: {
         PREAMBLE();
-        ObjPtr<mirror::String> s = ResolveString(self, shadow_frame,  inst->VRegB_31c());
+        ObjPtr<mirror::String> s = ResolveString(self,
+                                                 shadow_frame,
+                                                 dex::StringIndex(inst->VRegB_31c()));
         if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index fbfed40..c8c1563 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -291,7 +291,7 @@
                                    ShadowFrame* shadow_frame,
                                    Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, index);
+  ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
   if (UNLIKELY(s == nullptr)) {
     return true;
   }
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index fad7d90..5574a11 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -621,8 +621,8 @@
   Thread* const self = Thread::Current();
   self->AssertThreadSuspensionIsAllowable();
   CHECK(pReq != nullptr);
+  CHECK_EQ(threadId, Dbg::GetThreadSelfId()) << "Only the current thread can suspend itself";
   /* send request and possibly suspend ourselves */
-  JDWP::ObjectId thread_self_id = Dbg::GetThreadSelfId();
   ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
   if (suspend_policy != SP_NONE) {
     AcquireJdwpTokenForEvent(threadId);
@@ -631,7 +631,7 @@
   {
     // Before suspending, we change our state to kSuspended so the debugger sees us as RUNNING.
     ScopedThreadStateChange stsc(self, kSuspended);
-    SuspendByPolicy(suspend_policy, thread_self_id);
+    SuspendByPolicy(suspend_policy, threadId);
   }
 }
 
@@ -658,13 +658,10 @@
 }
 
 void JdwpState::AcquireJdwpTokenForEvent(ObjectId threadId) {
-  CHECK_NE(Thread::Current(), GetDebugThread()) << "Expected event thread";
-  CHECK_NE(debug_thread_id_, threadId) << "Not expected debug thread";
   SetWaitForJdwpToken(threadId);
 }
 
 void JdwpState::ReleaseJdwpTokenForEvent() {
-  CHECK_NE(Thread::Current(), GetDebugThread()) << "Expected event thread";
   ClearWaitForJdwpToken();
 }
 
@@ -685,23 +682,28 @@
   /* this is held for very brief periods; contention is unlikely */
   MutexLock mu(self, jdwp_token_lock_);
 
-  CHECK_NE(jdwp_token_owner_thread_id_, threadId) << "Thread is already holding event thread lock";
+  if (jdwp_token_owner_thread_id_ == threadId) {
+    // Only the debugger thread may already hold the event token. For instance, it may trigger
+    // a CLASS_PREPARE event while processing a command that initializes a class.
+    CHECK_EQ(threadId, debug_thread_id_) << "Non-debugger thread is already holding event token";
+  } else {
+    /*
+     * If another thread is already doing stuff, wait for it.  This can
+     * go to sleep indefinitely.
+     */
 
-  /*
-   * If another thread is already doing stuff, wait for it.  This can
-   * go to sleep indefinitely.
-   */
-  while (jdwp_token_owner_thread_id_ != 0) {
-    VLOG(jdwp) << StringPrintf("event in progress (%#" PRIx64 "), %#" PRIx64 " sleeping",
-                               jdwp_token_owner_thread_id_, threadId);
-    waited = true;
-    jdwp_token_cond_.Wait(self);
-  }
+    while (jdwp_token_owner_thread_id_ != 0) {
+      VLOG(jdwp) << StringPrintf("event in progress (%#" PRIx64 "), %#" PRIx64 " sleeping",
+                                 jdwp_token_owner_thread_id_, threadId);
+      waited = true;
+      jdwp_token_cond_.Wait(self);
+    }
 
-  if (waited || threadId != debug_thread_id_) {
-    VLOG(jdwp) << StringPrintf("event token grabbed (%#" PRIx64 ")", threadId);
+    if (waited || threadId != debug_thread_id_) {
+      VLOG(jdwp) << StringPrintf("event token grabbed (%#" PRIx64 ")", threadId);
+    }
+    jdwp_token_owner_thread_id_ = threadId;
   }
-  jdwp_token_owner_thread_id_ = threadId;
 }
 
 /*
@@ -1224,14 +1226,15 @@
     VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
   }
 
-  if (thread_id == debug_thread_id_) {
+  ObjectId reported_thread_id = thread_id;
+  if (reported_thread_id == debug_thread_id_) {
     /*
      * JDWP says that, for a class prep in the debugger thread, we
      * should set thread to null and if any threads were supposed
      * to be suspended then we suspend all other threads.
      */
     VLOG(jdwp) << "  NOTE: class prepare in debugger thread!";
-    thread_id = 0;
+    reported_thread_id = 0;
     if (suspend_policy == SP_EVENT_THREAD) {
       suspend_policy = SP_ALL;
     }
@@ -1244,7 +1247,7 @@
   for (const JdwpEvent* pEvent : match_list) {
     expandBufAdd1(pReq, pEvent->eventKind);
     expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, thread_id);
+    expandBufAddObjectId(pReq, reported_thread_id);
     expandBufAdd1(pReq, tag);
     expandBufAddRefTypeId(pReq, class_id);
     expandBufAddUtf8String(pReq, signature);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3531852..2ae989a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -133,7 +133,7 @@
                            size_t max_capacity,
                            bool garbage_collect_code)
     : lock_("Jit code cache", kJitCodeCacheLock),
-      lock_cond_("Jit code cache variable", lock_),
+      lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
       code_map_(code_map),
       data_map_(data_map),
@@ -152,7 +152,9 @@
       number_of_collections_(0),
       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
       histogram_code_memory_use_("Memory used for compiled code", 16),
-      histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
+      histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
+      is_weak_access_enabled_(true),
+      inline_cache_cond_("Jit inline cache condition variable", lock_) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -327,6 +329,34 @@
       }
     }
   }
+  // Walk over inline caches to clear entries containing unloaded classes.
+  for (ProfilingInfo* info : profiling_infos_) {
+    for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
+      InlineCache* cache = &info->cache_[i];
+      for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
+        // This does not need a read barrier because this is called by GC.
+        mirror::Class* cls = cache->classes_[j].Read<kWithoutReadBarrier>();
+        if (cls != nullptr) {
+          // Look at the classloader of the class to know if it has been
+          // unloaded.
+          // This does not need a read barrier because this is called by GC.
+          mirror::Object* class_loader =
+              cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
+          if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) {
+            // The class loader is live, update the entry if the class has moved.
+            mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
+            // Note that new_object can be null for CMS and newly allocated objects.
+            if (new_cls != nullptr && new_cls != cls) {
+              cache->classes_[j] = GcRoot<mirror::Class>(new_cls);
+            }
+          } else {
+            // The class loader is not live, clear the entry.
+            cache->classes_[j] = GcRoot<mirror::Class>(nullptr);
+          }
+        }
+      }
+    }
+  }
 }
 
 void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
@@ -375,11 +405,51 @@
   }
 }
 
-void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
+  return kUseReadBarrier
+      ? self->GetWeakRefAccessEnabled()
+      : is_weak_access_enabled_.LoadSequentiallyConsistent();
+}
+
+void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
+  if (IsWeakAccessEnabled(self)) {
+    return;
+  }
+  ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
   MutexLock mu(self, lock_);
-  for (ProfilingInfo* info : profiling_infos_) {
-    if (!info->IsInUseByCompiler()) {
-      info->ClearGcRootsInInlineCaches();
+  while (!IsWeakAccessEnabled(self)) {
+    inline_cache_cond_.Wait(self);
+  }
+}
+
+void JitCodeCache::BroadcastForInlineCacheAccess() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, lock_);
+  inline_cache_cond_.Broadcast(self);
+}
+
+void JitCodeCache::AllowInlineCacheAccess() {
+  DCHECK(!kUseReadBarrier);
+  is_weak_access_enabled_.StoreSequentiallyConsistent(true);
+  BroadcastForInlineCacheAccess();
+}
+
+void JitCodeCache::DisallowInlineCacheAccess() {
+  DCHECK(!kUseReadBarrier);
+  is_weak_access_enabled_.StoreSequentiallyConsistent(false);
+}
+
+void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
+                                       Handle<mirror::ObjectArray<mirror::Class>> array) {
+  WaitUntilInlineCacheAccessible(Thread::Current());
+  // Note that we don't need to lock `lock_` here, the compiler calling
+  // this method has already ensured the inline cache will not be deleted.
+  for (size_t in_cache = 0, in_array = 0;
+       in_cache < InlineCache::kIndividualCacheSize;
+       ++in_cache) {
+    mirror::Class* object = ic.classes_[in_cache].Read();
+    if (object != nullptr) {
+      array->Set(in_array++, object);
     }
   }
 }
@@ -837,8 +907,6 @@
 
   if (collect_profiling_info) {
     ScopedThreadSuspension sts(self, kSuspended);
-    gc::ScopedGCCriticalSection gcs(
-        self, gc::kGcCauseJitCodeCache, gc::kCollectorTypeJitCodeCache);
     MutexLock mu(self, lock_);
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
@@ -852,10 +920,6 @@
         // code cache collection.
         if (ContainsPc(ptr) &&
             info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
-          // We clear the inline caches as classes in it might be stalled.
-          info->ClearGcRootsInInlineCaches();
-          // Do a fence to make sure the clearing is seen before attaching to the method.
-          QuasiAtomic::ThreadFenceRelease();
           info->GetMethod()->SetProfilingInfo(info);
         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
           // No need for this ProfilingInfo object anymore.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 40112fe..be2cec5 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -36,6 +36,7 @@
 
 class ArtMethod;
 class LinearAlloc;
+class InlineCache;
 class ProfilingInfo;
 
 namespace jit {
@@ -156,7 +157,9 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+  void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
   // will collect and retry if the first allocation is unsuccessful.
@@ -200,6 +203,12 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // The GC needs to disallow the reading of inline caches when it processes them,
+  // to avoid having a class being used while it is being deleted.
+  void AllowInlineCacheAccess() REQUIRES(!lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+
  private:
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
@@ -275,6 +284,11 @@
   void FreeData(uint8_t* data) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
 
+  bool IsWeakAccessEnabled(Thread* self) const;
+  void WaitUntilInlineCacheAccessible(Thread* self)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -347,6 +361,14 @@
   // Histograms for keeping track of profiling info statistics.
   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
 
+  // Whether the GC allows accessing weaks in inline caches. Note that this
+  // is not used by the concurrent collector, which uses
+  // Thread::SetWeakRefAccessEnabled instead.
+  Atomic<bool> is_weak_access_enabled_;
+
+  // Condition to wait on for accessing inline caches.
+  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
 
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 9ec46f0..405280d 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -36,15 +36,6 @@
   for (size_t i = 0; i < number_of_inline_caches_; ++i) {
     cache_[i].dex_pc_ = entries[i];
   }
-  if (method->IsCopied()) {
-    // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
-    // appears to happen very rarely in practice.
-    holding_class_ = GcRoot<mirror::Class>(
-        Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
-  } else {
-    holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
-  }
-  DCHECK(!holding_class_.IsNull());
 }
 
 bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
@@ -116,14 +107,6 @@
         --i;
       } else {
         // We successfully set `cls`, just return.
-        // Since the instrumentation is marked from the declaring class we need to mark the card so
-        // that mod-union tables and card rescanning know about the update.
-        // Note that the declaring class is not necessarily the holding class if the method is
-        // copied. We need the card mark to be in the holding class since that is from where we
-        // will visit the profiling info.
-        if (!holding_class_.IsNull()) {
-          Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
-        }
         return;
       }
     }
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 1056fac..9902bb5 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -39,46 +39,13 @@
 // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
 class InlineCache {
  public:
-  bool IsMonomorphic() const {
-    DCHECK_GE(kIndividualCacheSize, 2);
-    return !classes_[0].IsNull() && classes_[1].IsNull();
-  }
-
-  bool IsMegamorphic() const {
-    for (size_t i = 0; i < kIndividualCacheSize; ++i) {
-      if (classes_[i].IsNull()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Note that we cannot ensure the inline cache is actually monomorphic
-    // at this point, as other threads may have updated it.
-    DCHECK(!classes_[0].IsNull());
-    return classes_[0].Read();
-  }
-
-  bool IsUninitialized() const {
-    return classes_[0].IsNull();
-  }
-
-  bool IsPolymorphic() const {
-    DCHECK_GE(kIndividualCacheSize, 3);
-    return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
-  }
-
-  mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return classes_[i].Read();
-  }
-
   static constexpr uint16_t kIndividualCacheSize = 5;
 
  private:
   uint32_t dex_pc_;
   GcRoot<mirror::Class> classes_[kIndividualCacheSize];
 
+  friend class jit::JitCodeCache;
   friend class ProfilingInfo;
 
   DISALLOW_COPY_AND_ASSIGN(InlineCache);
@@ -102,18 +69,6 @@
       REQUIRES(Roles::uninterruptible_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
-  template<typename RootVisitorType>
-  void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
-    visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
-    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
-      InlineCache* cache = &cache_[i];
-      for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
-        visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
-      }
-    }
-  }
-
   ArtMethod* GetMethod() const {
     return method_;
   }
@@ -175,9 +130,6 @@
   // Method this profiling info is for.
   ArtMethod* const method_;
 
-  // Holding class for the method in case method is a copied method.
-  GcRoot<mirror::Class> holding_class_;
-
   // Whether the ArtMethod is currently being compiled. This flag
   // is implicitly guarded by the JIT code cache lock.
   // TODO: Make the JIT code cache lock global.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1ec59b3..6da72e4 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -282,6 +282,7 @@
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
+  use_ashmem = use_ashmem && !kIsTargetLinux;
   if (byte_count == 0) {
     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
   }
@@ -522,6 +523,7 @@
 
 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
                            std::string* error_msg, bool use_ashmem) {
+  use_ashmem = use_ashmem && !kIsTargetLinux;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 049ae12..0fea1a5 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -68,7 +68,7 @@
                               bool low_4gb,
                               bool reuse,
                               std::string* error_msg,
-                              bool use_ashmem = !kIsTargetLinux);
+                              bool use_ashmem = true);
 
   // Create placeholder for a region allocated by direct call to mmap.
   // This is useful when we do not have control over the code calling mmap,
@@ -172,7 +172,7 @@
                      const char* tail_name,
                      int tail_prot,
                      std::string* error_msg,
-                     bool use_ashmem = !kIsTargetLinux);
+                     bool use_ashmem = true);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
       REQUIRES(!Locks::mem_maps_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aa5da2e..5def65e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -65,8 +65,10 @@
       OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
 }
 
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline ClassLoader* Class::GetClassLoader() {
-  return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
+  return GetFieldObject<ClassLoader, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
 }
 
 template<VerifyObjectFlags kVerifyFlags>
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 792f626..248c941 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -674,6 +674,8 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index d903f71..be8815a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -40,13 +40,14 @@
   return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
 }
 
-inline mirror::String* DexCache::GetResolvedString(uint32_t string_idx) {
-  DCHECK_LT(string_idx, GetDexFile()->NumStringIds());
-  return StringDexCachePair::Lookup(GetStrings(), string_idx, NumStrings()).Read();
+inline mirror::String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+  DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
+  return StringDexCachePair::Lookup(GetStrings(), string_idx.index_, NumStrings()).Read();
 }
 
-inline void DexCache::SetResolvedString(uint32_t string_idx, ObjPtr<mirror::String> resolved) {
-  StringDexCachePair::Assign(GetStrings(), string_idx, resolved.Ptr(), NumStrings());
+inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
+                                        ObjPtr<mirror::String> resolved) {
+  StringDexCachePair::Assign(GetStrings(), string_idx.index_, resolved.Ptr(), NumStrings());
   Runtime* const runtime = Runtime::Current();
   if (UNLIKELY(runtime->IsActiveTransaction())) {
     DCHECK(runtime->IsAotCompiler());
@@ -56,12 +57,12 @@
   runtime->GetHeap()->WriteBarrierEveryFieldOf(this);
 }
 
-inline void DexCache::ClearString(uint32_t string_idx) {
-  const uint32_t slot_idx = string_idx % NumStrings();
+inline void DexCache::ClearString(dex::StringIndex string_idx) {
+  const uint32_t slot_idx = string_idx.index_ % NumStrings();
   DCHECK(Runtime::Current()->IsAotCompiler());
   StringDexCacheType* slot = &GetStrings()[slot_idx];
   // This is racy but should only be called from the transactional interpreter.
-  if (slot->load(std::memory_order_relaxed).index == string_idx) {
+  if (slot->load(std::memory_order_relaxed).index == string_idx.index_) {
     StringDexCachePair cleared(
         nullptr,
         StringDexCachePair::InvalidIndexForSlot(slot_idx));
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 7d82d3a..cc4d01a 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -214,15 +214,15 @@
     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
   }
 
-  mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
+  mirror::String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetResolvedString(uint32_t string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
+  void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Clear a string for a string_idx, used to undo string intern transactions to make sure
   // the string isn't kept live.
-  void ClearString(uint32_t string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+  void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 48feb11..3058df4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -287,7 +287,7 @@
 
 // Based on ClassLinker::ResolveString.
 static void PreloadDexCachesResolveString(
-    Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
+    Handle<mirror::DexCache> dex_cache, dex::StringIndex string_idx, StringTable& strings)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::String>  string = dex_cache->GetResolvedString(string_idx);
   if (string != nullptr) {
@@ -450,7 +450,7 @@
       continue;
     }
     for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
-      ObjPtr<mirror::String> string = dex_cache->GetResolvedString(j);
+      ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
       if (string != nullptr) {
         filled->num_strings++;
       }
@@ -514,7 +514,7 @@
 
     if (kPreloadDexCachesStrings) {
       for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
-        PreloadDexCachesResolveString(dex_cache, j, strings);
+        PreloadDexCachesResolveString(dex_cache, dex::StringIndex(j), strings);
       }
     }
 
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index f6de593..f1c350f 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -61,7 +61,8 @@
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
   CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
-  return soa.AddLocalReference<jobject>(dex_cache->GetResolvedString(string_index));
+  return soa.AddLocalReference<jobject>(
+      dex_cache->GetResolvedString(dex::StringIndex(string_index)));
 }
 
 static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index,
@@ -77,7 +78,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
   CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
-  dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String>(string));
+  dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string));
 }
 
 static JNINativeMethod gMethods[] = {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 404e5ce..bdf8b0e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -719,7 +719,7 @@
     dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
 #else
     UNUSED(oat_file_begin);
-    static_assert(!kIsTargetBuild, "host_dlopen_handles_ will leak handles");
+    static_assert(!kIsTargetBuild || kIsTargetLinux, "host_dlopen_handles_ will leak handles");
     MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
     dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
     if (dlopen_handle_ != nullptr) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8a3bac7..68b956b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1782,6 +1782,9 @@
   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
   java_vm_->DisallowNewWeakGlobals();
   heap_->DisallowNewAllocationRecords();
+  if (GetJit() != nullptr) {
+    GetJit()->GetCodeCache()->DisallowInlineCacheAccess();
+  }
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -1795,6 +1798,9 @@
   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
   java_vm_->AllowNewWeakGlobals();
   heap_->AllowNewAllocationRecords();
+  if (GetJit() != nullptr) {
+    GetJit()->GetCodeCache()->AllowInlineCacheAccess();
+  }
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -1810,6 +1816,9 @@
   intern_table_->BroadcastForNewInterns();
   java_vm_->BroadcastForNewWeakGlobals();
   heap_->BroadcastForNewAllocationRecords();
+  if (GetJit() != nullptr) {
+    GetJit()->GetCodeCache()->BroadcastForInlineCacheAccess();
+  }
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -2023,7 +2032,8 @@
   preinitialization_transaction_->RecordWeakStringRemoval(s);
 }
 
-void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) const {
+void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
+                                  dex::StringIndex string_idx) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordResolveString(dex_cache, string_idx);
@@ -2183,7 +2193,7 @@
 
 NO_RETURN
 void Runtime::Aborter(const char* abort_message) {
-#ifdef __ANDROID__
+#ifdef ART_TARGET_ANDROID
   android_set_abort_message(abort_message);
 #endif
   Runtime::Abort(abort_message);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index de5a356..4f31887 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
 
 #include "arch/instruction_set.h"
 #include "base/macros.h"
+#include "dex_file_types.h"
 #include "experimental_flags.h"
 #include "gc_root.h"
 #include "instrumentation.h"
@@ -520,7 +521,7 @@
       REQUIRES(Locks::intern_table_lock_);
   void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
       REQUIRES(Locks::intern_table_lock_);
-  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) const
+  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
diff --git a/runtime/stack.h b/runtime/stack.h
index 992bda5..e879214 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -755,10 +755,6 @@
     return cur_shadow_frame_;
   }
 
-  bool IsCurrentFrameInInterpreter() const {
-    return cur_shadow_frame_ != nullptr;
-  }
-
   HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
     ArtMethod** sp = GetCurrentQuickFrame();
     // Skip ArtMethod*; handle scope comes next;
diff --git a/runtime/string_reference.h b/runtime/string_reference.h
index c75c218..0fc06e6 100644
--- a/runtime/string_reference.h
+++ b/runtime/string_reference.h
@@ -21,20 +21,22 @@
 
 #include "base/logging.h"
 #include "dex_file-inl.h"
+#include "dex_file_types.h"
 #include "utf-inl.h"
 
 namespace art {
 
 // A string is located by its DexFile and the string_ids_ table index into that DexFile.
 struct StringReference {
-  StringReference(const DexFile* file, uint32_t index) : dex_file(file), string_index(index) { }
+  StringReference(const DexFile* file, dex::StringIndex index)
+      : dex_file(file), string_index(index) { }
 
   const char* GetStringData() const {
     return dex_file->GetStringData(dex_file->GetStringId(string_index));
   }
 
   const DexFile* dex_file;
-  uint32_t string_index;
+  dex::StringIndex string_index;
 };
 
 // Compare only the reference and not the string contents.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 65c8681..1283cf0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -122,21 +122,26 @@
   CHECK(kUseReadBarrier);
   tls32_.is_gc_marking = is_marking;
   UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, is_marking);
+  ResetQuickAllocEntryPointsForThread(is_marking);
 }
 
 void Thread::InitTlsEntryPoints() {
   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
   uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
-  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
-      sizeof(tlsPtr_.quick_entrypoints));
+  uintptr_t* end = reinterpret_cast<uintptr_t*>(
+      reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints));
   for (uintptr_t* it = begin; it != end; ++it) {
     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
   }
   InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints);
 }
 
-void Thread::ResetQuickAllocEntryPointsForThread() {
-  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
+void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) {
+  if (kUseReadBarrier && kRuntimeISA != kX86_64) {
+    // Allocation entrypoint switching is currently only implemented for X86_64.
+    is_marking = true;
+  }
+  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking);
 }
 
 class DeoptimizationContextRecord {
diff --git a/runtime/thread.h b/runtime/thread.h
index 97093a6..35226f2 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1007,7 +1007,7 @@
     tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
   }
 
-  void ResetQuickAllocEntryPointsForThread();
+  void ResetQuickAllocEntryPointsForThread(bool is_marking);
 
   // Returns the remaining space in the TLAB.
   size_t TlabSize() const;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index c5da5d2..2536968 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -167,9 +167,10 @@
   array_log.LogValue(index, value);
 }
 
-void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) {
+void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
+                                      dex::StringIndex string_idx) {
   DCHECK(dex_cache != nullptr);
-  DCHECK_LT(string_idx, dex_cache->GetDexFile()->NumStringIds());
+  DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
   MutexLock mu(Thread::Current(), log_lock_);
   resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
 }
@@ -510,11 +511,11 @@
 }
 
 Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
-                                                uint32_t string_idx)
+                                                dex::StringIndex string_idx)
     : dex_cache_(dex_cache),
       string_idx_(string_idx) {
   DCHECK(dex_cache != nullptr);
-  DCHECK_LT(string_idx_, dex_cache->GetDexFile()->NumStringIds());
+  DCHECK_LT(string_idx_.index_, dex_cache->GetDexFile()->NumStringIds());
 }
 
 void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 2ec2f50..1774657 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -20,6 +20,7 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/value_object.h"
+#include "dex_file_types.h"
 #include "gc_root.h"
 #include "object_callbacks.h"
 #include "offsets.h"
@@ -97,7 +98,7 @@
       REQUIRES(!log_lock_);
 
   // Record resolve string.
-  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx)
+  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!log_lock_);
 
@@ -197,7 +198,7 @@
 
   class ResolveStringLog : public ValueObject {
    public:
-    ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx);
+    ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
 
     void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -205,7 +206,7 @@
 
    private:
     GcRoot<mirror::DexCache> dex_cache_;
-    const uint32_t string_idx_;
+    const dex::StringIndex string_idx_;
   };
 
   void LogInternedString(const InternStringLog& log)
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 77c2b76..a43c967 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -26,8 +26,6 @@
 
 namespace art {
 
-static const size_t kDexNoIndex = DexFile::kDexNoIndex;  // Make copy to prevent linking errors.
-
 class TransactionTest : public CommonRuntimeTest {
  public:
   // Tests failing class initialization due to native call with transaction rollback.
@@ -507,8 +505,8 @@
   static const char* kResolvedString = "ResolvedString";
   const DexFile::StringId* string_id = dex_file->FindStringId(kResolvedString);
   ASSERT_TRUE(string_id != nullptr);
-  uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
-  ASSERT_NE(string_idx, kDexNoIndex);
+  dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
+  ASSERT_TRUE(string_idx.IsValid());
   // String should only get resolved by the initializer.
   EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
   EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 01af5ec..f9bff23 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -80,8 +80,8 @@
   }
 }
 
-uint32_t VerifierDeps::GetClassDescriptorStringId(const DexFile& dex_file,
-                                                  ObjPtr<mirror::Class> klass) {
+dex::StringIndex VerifierDeps::GetClassDescriptorStringId(const DexFile& dex_file,
+                                                          ObjPtr<mirror::Class> klass) {
   DCHECK(klass != nullptr);
   ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
   // Array and proxy classes do not have a dex cache.
@@ -104,9 +104,9 @@
 }
 
 // Try to find the string descriptor of the class. type_idx is a best guess of a matching string id.
-static uint32_t TryGetClassDescriptorStringId(const DexFile& dex_file,
-                                              dex::TypeIndex type_idx,
-                                              ObjPtr<mirror::Class> klass)
+static dex::StringIndex TryGetClassDescriptorStringId(const DexFile& dex_file,
+                                                      dex::TypeIndex type_idx,
+                                                      ObjPtr<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!klass->IsArrayClass()) {
     const DexFile::TypeId& type_id = dex_file.GetTypeId(type_idx);
@@ -117,21 +117,21 @@
       return type_id.descriptor_idx_;
     }
   }
-  return DexFile::kDexNoIndex;
+  return dex::StringIndex::Invalid();
 }
 
-uint32_t VerifierDeps::GetMethodDeclaringClassStringId(const DexFile& dex_file,
-                                                       uint32_t dex_method_index,
-                                                       ArtMethod* method) {
+dex::StringIndex VerifierDeps::GetMethodDeclaringClassStringId(const DexFile& dex_file,
+                                                               uint32_t dex_method_index,
+                                                               ArtMethod* method) {
   static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant");
   if (method == nullptr) {
-    return VerifierDeps::kUnresolvedMarker;
+    return dex::StringIndex(VerifierDeps::kUnresolvedMarker);
   }
-  const uint32_t string_id = TryGetClassDescriptorStringId(
+  const dex::StringIndex string_id = TryGetClassDescriptorStringId(
       dex_file,
       dex_file.GetMethodId(dex_method_index).class_idx_,
       method->GetDeclaringClass());
-  if (string_id != DexFile::kDexNoIndex) {
+  if (string_id.IsValid()) {
     // Got lucky using the original dex file, return based on the input dex file.
     DCHECK_EQ(GetClassDescriptorStringId(dex_file, method->GetDeclaringClass()), string_id);
     return string_id;
@@ -139,18 +139,18 @@
   return GetClassDescriptorStringId(dex_file, method->GetDeclaringClass());
 }
 
-uint32_t VerifierDeps::GetFieldDeclaringClassStringId(const DexFile& dex_file,
-                                                      uint32_t dex_field_idx,
-                                                      ArtField* field) {
+dex::StringIndex VerifierDeps::GetFieldDeclaringClassStringId(const DexFile& dex_file,
+                                                              uint32_t dex_field_idx,
+                                                              ArtField* field) {
   static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant");
   if (field == nullptr) {
-    return VerifierDeps::kUnresolvedMarker;
+    return dex::StringIndex(VerifierDeps::kUnresolvedMarker);
   }
-  const uint32_t string_id = TryGetClassDescriptorStringId(
+  const dex::StringIndex string_id = TryGetClassDescriptorStringId(
       dex_file,
       dex_file.GetFieldId(dex_field_idx).class_idx_,
       field->GetDeclaringClass());
-  if (string_id != DexFile::kDexNoIndex) {
+  if (string_id.IsValid()) {
     // Got lucky using the original dex file, return based on the input dex file.
     DCHECK_EQ(GetClassDescriptorStringId(dex_file, field->GetDeclaringClass()), string_id);
     return string_id;
@@ -190,7 +190,7 @@
   return false;
 }
 
-uint32_t VerifierDeps::GetIdFromString(const DexFile& dex_file, const std::string& str) {
+dex::StringIndex VerifierDeps::GetIdFromString(const DexFile& dex_file, const std::string& str) {
   const DexFile::StringId* string_id = dex_file.FindStringId(str.c_str());
   if (string_id != nullptr) {
     // String is in the DEX file. Return its ID.
@@ -212,32 +212,33 @@
   {
     ReaderMutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
     if (FindExistingStringId(deps->strings_, str, &found_id)) {
-      return num_ids_in_dex + found_id;
+      return dex::StringIndex(num_ids_in_dex + found_id);
     }
   }
   {
     WriterMutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
     if (FindExistingStringId(deps->strings_, str, &found_id)) {
-      return num_ids_in_dex + found_id;
+      return dex::StringIndex(num_ids_in_dex + found_id);
     }
     deps->strings_.push_back(str);
-    uint32_t new_id = num_ids_in_dex + deps->strings_.size() - 1;
-    CHECK_GE(new_id, num_ids_in_dex);  // check for overflows
+    dex::StringIndex new_id(num_ids_in_dex + deps->strings_.size() - 1);
+    CHECK_GE(new_id.index_, num_ids_in_dex);  // check for overflows
     DCHECK_EQ(str, singleton->GetStringFromId(dex_file, new_id));
     return new_id;
   }
 }
 
-std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, uint32_t string_id) const {
+std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, dex::StringIndex string_id)
+    const {
   uint32_t num_ids_in_dex = dex_file.NumStringIds();
-  if (string_id < num_ids_in_dex) {
+  if (string_id.index_ < num_ids_in_dex) {
     return std::string(dex_file.StringDataByIdx(string_id));
   } else {
     const DexFileDeps* deps = GetDexFileDeps(dex_file);
     DCHECK(deps != nullptr);
-    string_id -= num_ids_in_dex;
-    CHECK_LT(string_id, deps->strings_.size());
-    return deps->strings_[string_id];
+    string_id.index_ -= num_ids_in_dex;
+    CHECK_LT(string_id.index_, deps->strings_.size());
+    return deps->strings_[string_id.index_];
   }
 }
 
@@ -389,8 +390,8 @@
   }
 
   // Get string IDs for both descriptors and store in the appropriate set.
-  uint32_t destination_id = GetClassDescriptorStringId(dex_file, destination);
-  uint32_t source_id = GetClassDescriptorStringId(dex_file, source);
+  dex::StringIndex destination_id = GetClassDescriptorStringId(dex_file, destination);
+  dex::StringIndex source_id = GetClassDescriptorStringId(dex_file, source);
 
   if (is_assignable) {
     dex_deps->assignable_types_.emplace(TypeAssignability(destination_id, source_id));
@@ -471,6 +472,9 @@
 template<> inline uint32_t Encode<dex::TypeIndex>(dex::TypeIndex in) {
   return in.index_;
 }
+template<> inline uint32_t Encode<dex::StringIndex>(dex::StringIndex in) {
+  return in.index_;
+}
 
 template<typename T> inline T Decode(uint32_t in);
 
@@ -483,6 +487,9 @@
 template<> inline dex::TypeIndex Decode<dex::TypeIndex>(uint32_t in) {
   return dex::TypeIndex(in);
 }
+template<> inline dex::StringIndex Decode<dex::StringIndex>(uint32_t in) {
+  return dex::StringIndex(in);
+}
 
 template<typename T1, typename T2>
 static inline void EncodeTuple(std::vector<uint8_t>* out, const std::tuple<T1, T2>& t) {
@@ -508,7 +515,7 @@
 static inline void DecodeTuple(const uint8_t** in, const uint8_t* end, std::tuple<T1, T2, T3>* t) {
   T1 v1 = Decode<T1>(DecodeUint32WithOverflowCheck(in, end));
   T2 v2 = Decode<T2>(DecodeUint32WithOverflowCheck(in, end));
-  T3 v3 = Decode<T2>(DecodeUint32WithOverflowCheck(in, end));
+  T3 v3 = Decode<T3>(DecodeUint32WithOverflowCheck(in, end));
   *t = std::make_tuple(v1, v2, v3);
 }
 
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index a12071b..4b8206f 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -129,41 +129,43 @@
     uint16_t GetAccessFlags() const { return std::get<1>(*this); }
   };
 
-  using FieldResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>;
+  using FieldResolutionBase = std::tuple<uint32_t, uint16_t, dex::StringIndex>;
   struct FieldResolution : public FieldResolutionBase {
     FieldResolution() = default;
     FieldResolution(const FieldResolution&) = default;
-    FieldResolution(uint32_t field_idx, uint16_t access_flags, uint32_t declaring_class_idx)
+    FieldResolution(uint32_t field_idx, uint16_t access_flags, dex::StringIndex declaring_class_idx)
         : FieldResolutionBase(field_idx, access_flags, declaring_class_idx) {}
 
     bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; }
     uint32_t GetDexFieldIndex() const { return std::get<0>(*this); }
     uint16_t GetAccessFlags() const { return std::get<1>(*this); }
-    uint32_t GetDeclaringClassIndex() const { return std::get<2>(*this); }
+    dex::StringIndex GetDeclaringClassIndex() const { return std::get<2>(*this); }
   };
 
-  using MethodResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>;
+  using MethodResolutionBase = std::tuple<uint32_t, uint16_t, dex::StringIndex>;
   struct MethodResolution : public MethodResolutionBase {
     MethodResolution() = default;
     MethodResolution(const MethodResolution&) = default;
-    MethodResolution(uint32_t method_idx, uint16_t access_flags, uint32_t declaring_class_idx)
+    MethodResolution(uint32_t method_idx,
+                     uint16_t access_flags,
+                     dex::StringIndex declaring_class_idx)
         : MethodResolutionBase(method_idx, access_flags, declaring_class_idx) {}
 
     bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; }
     uint32_t GetDexMethodIndex() const { return std::get<0>(*this); }
     uint16_t GetAccessFlags() const { return std::get<1>(*this); }
-    uint32_t GetDeclaringClassIndex() const { return std::get<2>(*this); }
+    dex::StringIndex GetDeclaringClassIndex() const { return std::get<2>(*this); }
   };
 
-  using TypeAssignabilityBase = std::tuple<uint32_t, uint32_t>;
+  using TypeAssignabilityBase = std::tuple<dex::StringIndex, dex::StringIndex>;
   struct TypeAssignability : public TypeAssignabilityBase {
     TypeAssignability() = default;
     TypeAssignability(const TypeAssignability&) = default;
-    TypeAssignability(uint32_t destination_idx, uint32_t source_idx)
+    TypeAssignability(dex::StringIndex destination_idx, dex::StringIndex source_idx)
         : TypeAssignabilityBase(destination_idx, source_idx) {}
 
-    uint32_t GetDestination() const { return std::get<0>(*this); }
-    uint32_t GetSource() const { return std::get<1>(*this); }
+    dex::StringIndex GetDestination() const { return std::get<0>(*this); }
+    dex::StringIndex GetSource() const { return std::get<1>(*this); }
   };
 
   // Data structure representing dependencies collected during verification of
@@ -206,11 +208,11 @@
   // string ID. If not, an ID is assigned to the string and cached in `strings_`
   // of the corresponding DexFileDeps structure (either provided or inferred from
   // `dex_file`).
-  uint32_t GetIdFromString(const DexFile& dex_file, const std::string& str)
+  dex::StringIndex GetIdFromString(const DexFile& dex_file, const std::string& str)
       REQUIRES(!Locks::verifier_deps_lock_);
 
   // Returns the string represented by `id`.
-  std::string GetStringFromId(const DexFile& dex_file, uint32_t string_id) const;
+  std::string GetStringFromId(const DexFile& dex_file, dex::StringIndex string_id) const;
 
   // Returns the bytecode access flags of `element` (bottom 16 bits), or
   // `kUnresolvedMarker` if `element` is null.
@@ -220,17 +222,17 @@
 
   // Returns a string ID of the descriptor of the declaring class of `element`,
   // or `kUnresolvedMarker` if `element` is null.
-  uint32_t GetMethodDeclaringClassStringId(const DexFile& dex_file,
-                                           uint32_t dex_method_idx,
-                                           ArtMethod* method)
+  dex::StringIndex GetMethodDeclaringClassStringId(const DexFile& dex_file,
+                                                   uint32_t dex_method_idx,
+                                                   ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  uint32_t GetFieldDeclaringClassStringId(const DexFile& dex_file,
-                                          uint32_t dex_field_idx,
-                                          ArtField* field)
+  dex::StringIndex GetFieldDeclaringClassStringId(const DexFile& dex_file,
+                                                  uint32_t dex_field_idx,
+                                                  ArtField* field)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns a string ID of the descriptor of the class.
-  uint32_t GetClassDescriptorStringId(const DexFile& dex_file, ObjPtr<mirror::Class> klass)
+  dex::StringIndex GetClassDescriptorStringId(const DexFile& dex_file, ObjPtr<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
 
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 0360eda..5fc5464 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -46,12 +46,12 @@
       CHECK_EQ(value, 42u);
 
       bool success = GetVReg(m, 1, kIntVReg, &value);
-      if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+      if (!IsShadowFrame() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK(!success);
       }
 
       success = GetVReg(m, 2, kIntVReg, &value);
-      if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+      if (!IsShadowFrame() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK(!success);
       }
 
@@ -83,12 +83,12 @@
       CHECK_EQ(value, 42u);
 
       bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
-      if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+      if (!IsShadowFrame() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK(!success);
       }
 
       success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
-      if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+      if (!IsShadowFrame() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK(!success);
       }
 
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index f62a77d..f867bdf 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -64,7 +64,7 @@
       CHECK_EQ(value, 1u);
 
       bool success = GetVReg(m, 2, kIntVReg, &value);
-      if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+      if (!IsShadowFrame() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
         CHECK(!success);
       }
 
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index 6b25747..0329e63 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -105,7 +105,7 @@
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
   /// CHECK-DAG:            and {{r\d+}}, {{r\d+}}, #0xff
-  /// CHECK-DAG:            movs {{r\d+}}, #0
+  /// CHECK-DAG:            mov{{s?}} {{r\d+}}, #0
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
 
@@ -115,7 +115,7 @@
 
   /// CHECK-START-ARM: long Main.and511(long) disassembly (after)
   /// CHECK:                mov {{r\d+}}, #511
-  /// CHECK-NEXT:           movs {{r\d+}}, #0
+  /// CHECK-NEXT:           mov{{s?}} {{r\d+}}, #0
   /// CHECK-NOT:            and{{(\.w)?}}
   /// CHECK-NOT:            bic{{(\.w)?}}
   /// CHECK:                and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
@@ -167,7 +167,7 @@
 
   /// CHECK-START-ARM: long Main.or511(long) disassembly (after)
   /// CHECK:                mov {{r\d+}}, #511
-  /// CHECK-NEXT:           movs {{r\d+}}, #0
+  /// CHECK-NEXT:           mov{{s?}} {{r\d+}}, #0
   /// CHECK-NOT:            orr{{(\.w)?}}
   /// CHECK-NOT:            orn
   /// CHECK:                orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
@@ -218,7 +218,7 @@
 
   /// CHECK-START-ARM: long Main.xor511(long) disassembly (after)
   /// CHECK:                mov {{r\d+}}, #511
-  /// CHECK-NEXT:           movs {{r\d+}}, #0
+  /// CHECK-NEXT:           mov{{s?}} {{r\d+}}, #0
   /// CHECK-NOT:            eor{{(\.w)?}}
   /// CHECK:                eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
   /// CHECK-NEXT:           eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
@@ -242,7 +242,7 @@
 
   // Note: No support for partial long constant embedding.
   /// CHECK-START-ARM: long Main.xor0xfffffff00000000f(long) disassembly (after)
-  /// CHECK-DAG:            movs {{r\d+}}, #15
+  /// CHECK-DAG:            mov{{s?}} {{r\d+}}, #15
   /// CHECK-DAG:            mvn {{r\d+}}, #15
   /// CHECK-NOT:            eor{{(\.w)?}}
   /// CHECK-DAG:            eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
@@ -507,7 +507,7 @@
   /// CHECK:     <<Arg:j\d+>>       ParameterValue
   /// CHECK:     <<ConstM1:j\d+>>   LongConstant -1
   /// CHECK:                        Add [<<Arg>>,<<ConstM1>>]
-  /// CHECK-NEXT:                   subs r{{\d+}}, #1
+  /// CHECK-NEXT:                   {{adds|subs}} r{{\d+}}, #{{4294967295|1}}
   /// CHECK-NEXT:                   adc r{{\d+}}, r{{\d+}}, #4294967295
   /// CHECK:                        Sub [<<Arg>>,<<ConstM1>>]
   /// CHECK-NEXT:                   adds r{{\d+}}, #1
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 50e8382..8eca6b2 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -43,7 +43,7 @@
           Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
       if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
         in_osr_method_ = true;
-      } else if (IsCurrentFrameInInterpreter()) {
+      } else if (IsShadowFrame()) {
         in_interpreter_ = true;
       }
       return false;
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 21b31b4..72616a1 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -16,36 +16,9 @@
 
 # Known broken tests for the ARM VIXL backend.
 TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
-  003-omnibus-opcodes \
-  020-string \
-  021-string2 \
-  042-new-instance \
-  044-proxy \
-  080-oom-throw \
-  082-inline-execute \
-  096-array-copy-concurrent-gc \
-  099-vmdebug \
-  100-reflect2 \
   103-string-append \
-  114-ParallelGC \
-  122-npe \
-  129-ThreadGetId \
   137-cfi \
-  144-static-field-sigquit \
-  412-new-array \
-  439-npe \
-  450-checker-types \
   488-checker-inline-recursive-calls \
-  515-dce-dominator \
-  520-equivalent-phi \
-  525-checker-arrays-fields1 \
-  525-checker-arrays-fields2 \
-  527-checker-array-access-split \
-  538-checker-embed-constants \
   552-checker-sharpening \
   562-checker-no-intermediate \
-  570-checker-osr \
   602-deoptimizeable \
-  700-LoadArgRegs \
-  800-smali \
-
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 554f66d..96b984d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -608,10 +608,7 @@
 TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS :=
 
 # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
-# 484: Baker's fast path based read barrier compiler instrumentation generates code containing
-#      more parallel moves on x86, thus some Checker assertions may fail.
-TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
-  484-checker-register-hints
+TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
 
 # Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
 TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 27c2054..493eafb 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -48,7 +48,7 @@
 include $(CLEAR_VARS)
 LOCAL_SRC_FILES := $(call all-java-files-under, test)
 LOCAL_JAR_MANIFEST := test/manifest.txt
-LOCAL_STATIC_JAVA_LIBRARIES := ahat junit
+LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MODULE_TAGS := tests
 LOCAL_MODULE := ahat-tests
diff --git a/tools/art b/tools/art
index 1394a46..91d6e27 100644
--- a/tools/art
+++ b/tools/art
@@ -30,8 +30,9 @@
 }
 
 function find_libdir() {
+  # Get the actual file, $DALVIKVM may be a symbolic link.
   # Use realpath instead of readlink because Android does not have a readlink.
-  if [ "$(realpath "$ANDROID_ROOT/bin/$DALVIKVM")" = "$(realpath "$ANDROID_ROOT/bin/dalvikvm64")" ]; then
+  if [[ "$(realpath "$ANDROID_ROOT/bin/$DALVIKVM")" == *dalvikvm64 ]]; then
     echo "lib64"
   else
     echo "lib"
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index a0658ec..c1cdf1e 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -4,7 +4,7 @@
 DexFuzz is primarily a tool for fuzzing DEX files. Fuzzing is the introduction of
 subtle changes ("mutations") to a file to produce a new test case. These test cases
 can be used to test the various modes of execution available to ART (Interpreter,
-Quick compiler, Optimizing compiler) to check for bugs in these modes of execution.
+Optimizing compiler) to check for bugs in these modes of execution.
 This is done by differential testing - each test file is executed with each mode of
 execution, and any differences between the resulting outputs may be an indication of
 a bug in one of the modes.
@@ -53,17 +53,16 @@
 
 And also at least two of the following backends:
   --interpreter
-  --quick
   --optimizing
 
 Note that if you wanted to test both ARM and ARM64 on an ARM64 device, you can use
 --allarm. Also in this case only one backend is needed, if i.e., you wanted to test
-ARM Quick Backend vs. ARM64 Quick Backend.
+ARM Optimizing Backend vs. ARM64 Optimizing Backend.
 
 Some legal examples:
-  --arm --quick --optimizing
-  --x86 --quick --optimizing --interpreter
-  --allarm --quick
+  --arm --optimizing --interpreter
+  --x86 --optimizing --interpreter
+  --allarm --optimizing
 
 Add in --device=<device name, e.g. device:generic> if you want to specify a device.
 Add in --execute-dir=<dir on device> if you want to specify an execution directory.
@@ -98,7 +97,6 @@
              those occurrences.
 Timed Out  - mutated files that timed out for one or more backends.
              Current timeouts are:
-               Quick - 5 seconds
                Optimizing - 5 seconds
                Intepreter - 30 seconds
               (use --short-timeouts to set all backends to 2 seconds.)
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index b442b22..99e03e8 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -51,6 +51,7 @@
   public static boolean usingSpecificDevice = false;
   public static int repeat = 1;
   public static String executeDirectory = "/data/art-test";
+  public static String androidRoot = "";
   public static String dumpMutationsFile = "mutations.dump";
   public static String loadMutationsFile = "mutations.dump";
   public static String reportLogFile = "report.log";
@@ -61,7 +62,6 @@
   public static boolean executeOnHost;
   public static boolean noBootImage;
   public static boolean useInterpreter;
-  public static boolean useQuick;
   public static boolean useOptimizing;
   public static boolean useArchArm;
   public static boolean useArchArm64;
@@ -96,12 +96,13 @@
     Log.always("                           the argument given to adb -s. Default execution mode.");
     Log.always("    --execute-dir=<dir>  : Push tests to this directory to execute them.");
     Log.always("                           (Default: /data/art-test)");
+    Log.always("    --android-root=<dir> : Set path where dalvikvm should look for binaries.");
+    Log.always("                           Use this when pushing binaries to a custom location.");
     Log.always("    --no-boot-image      : Use this flag when boot.art is not available.");
     Log.always("    --skip-host-verify   : When executing, skip host-verification stage");
     Log.always("    --execute-class=<c>  : When executing, execute this class (default: Main)");
     Log.always("");
     Log.always("    --interpreter        : Include the Interpreter in comparisons");
-    Log.always("    --quick              : Include the Quick Compiler in comparisons");
     Log.always("    --optimizing         : Include the Optimizing Compiler in comparisons");
     Log.always("");
     Log.always("    --arm                : Include ARM backends in comparisons");
@@ -160,8 +161,6 @@
       skipHostVerify = true;
     } else if (flag.equals("interpreter")) {
       useInterpreter = true;
-    } else if (flag.equals("quick")) {
-      useQuick = true;
     } else if (flag.equals("optimizing")) {
       useOptimizing = true;
     } else if (flag.equals("arm")) {
@@ -261,6 +260,8 @@
       usingSpecificDevice = true;
     } else if (key.equals("execute-dir")) {
       executeDirectory = value;
+    } else if (key.equals("android-root")) {
+      androidRoot = value;
     } else {
       Log.error("Unrecognised key: --" + key);
       usage();
@@ -423,18 +424,15 @@
       if (useInterpreter) {
         backends++;
       }
-      if (useQuick) {
-        backends++;
-      }
       if (useOptimizing) {
         backends++;
       }
       if (useArchArm && useArchArm64) {
-        // Could just be comparing quick-ARM versus quick-ARM64?
+        // Could just be comparing optimizing-ARM versus optimizing-ARM64?
         backends++;
       }
       if (backends < 2) {
-        Log.error("Not enough backends specified! Try --quick --interpreter!");
+        Log.error("Not enough backends specified! Try --optimizing --interpreter!");
         return false;
       }
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
index 72e36e8..84ed4c4 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (device.noBootImageAvailable()) {
       commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
deleted file mode 100644
index d9228ed..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Arm64QuickBackendExecutor extends Executor {
-
-  public Arm64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("ARM64 Quick Backend", 5, listener, Architecture.ARM64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    if (device.noBootImageAvailable()) {
-      commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
index ded8cf9..26a5eea 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (device.noBootImageAvailable()) {
       commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
deleted file mode 100644
index 0eb35f7..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class ArmQuickBackendExecutor extends Executor {
-
-  public ArmQuickBackendExecutor(BaseListener listener, Device device) {
-    super("ARM Quick Backend", 5, listener, Architecture.ARM, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    if (device.noBootImageAvailable()) {
-      commandBuilder.append("-Ximage:/data/art-test/core.art -Xnorelocate ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Executor.java b/tools/dexfuzz/src/dexfuzz/executors/Executor.java
index c62a3ad..2bcf3a1 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Executor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Executor.java
@@ -177,7 +177,15 @@
    * Executes runtime.
    */
   public void execute(String programName) {
-    executionResult = executeCommandWithTimeout(constructCommand(programName), true);
+    String command = "";
+    String androidRoot = Options.androidRoot.trim();
+    if (androidRoot.length() != 0) {
+      command = "PATH=" + androidRoot + "/bin ";
+      command += "ANDROID_ROOT=" + androidRoot + " ";
+      command += "LD_LIBRARY_PATH="+ androidRoot + "/lib:" + androidRoot + "/lib64 ";
+    }
+    command += constructCommand(programName);
+    executionResult = executeCommandWithTimeout(command, true);
   }
 
   /**
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
index 72d43e7..883ff2a 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
deleted file mode 100644
index e7e5ff6..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Mips64QuickBackendExecutor extends Executor {
-
-  public Mips64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS64 Quick Backend", 5, listener, Architecture.MIPS64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
index 63f6858..b7babdc 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
deleted file mode 100644
index b262090..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class MipsQuickBackendExecutor extends Executor {
-
-  public MipsQuickBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS Quick Backend", 5, listener, Architecture.MIPS, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
index 5908a8b..1d62051 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
@@ -30,6 +30,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     if (Options.executeOnHost) {
       commandBuilder.append(device.getHostExecutionFlags()).append(" ");
     }
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
deleted file mode 100644
index 9e8039d..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.Options;
-import dexfuzz.listeners.BaseListener;
-
-public class X86QuickBackendExecutor extends Executor {
-
-  public X86QuickBackendExecutor(BaseListener listener, Device device) {
-    super("x86 Quick Backend", 5, listener, Architecture.X86, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
-    if (Options.executeOnHost) {
-      commandBuilder.append(device.getHostExecutionFlags()).append(" ");
-    }
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
index 28ff1a5..ad44259 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
@@ -29,6 +29,9 @@
   protected String constructCommand(String programName) {
     StringBuilder commandBuilder = new StringBuilder();
     commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
+    // The -Xno-dex-file-fallback option ensures that the execution does not default to
+    // interpreter if compilations fails.
+    commandBuilder.append("-Xno-dex-file-fallback ");
     commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
     commandBuilder.append(executeClass);
     return commandBuilder.toString();
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
deleted file mode 100644
index 22cafe2..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class X86_64QuickBackendExecutor extends Executor {
-
-  public X86_64QuickBackendExecutor(BaseListener listener, Device device) {
-    super("x86_64 Quick Backend", 5, listener, Architecture.X86_64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
index bc39d79..1797d90 100644
--- a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
+++ b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
@@ -22,24 +22,18 @@
 import dexfuzz.executors.Architecture;
 import dexfuzz.executors.Arm64InterpreterExecutor;
 import dexfuzz.executors.Arm64OptimizingBackendExecutor;
-import dexfuzz.executors.Arm64QuickBackendExecutor;
 import dexfuzz.executors.ArmInterpreterExecutor;
 import dexfuzz.executors.ArmOptimizingBackendExecutor;
-import dexfuzz.executors.ArmQuickBackendExecutor;
 import dexfuzz.executors.Device;
 import dexfuzz.executors.Executor;
 import dexfuzz.executors.Mips64InterpreterExecutor;
 import dexfuzz.executors.Mips64OptimizingBackendExecutor;
-import dexfuzz.executors.Mips64QuickBackendExecutor;
 import dexfuzz.executors.MipsInterpreterExecutor;
 import dexfuzz.executors.MipsOptimizingBackendExecutor;
-import dexfuzz.executors.MipsQuickBackendExecutor;
 import dexfuzz.executors.X86InterpreterExecutor;
 import dexfuzz.executors.X86OptimizingBackendExecutor;
-import dexfuzz.executors.X86QuickBackendExecutor;
 import dexfuzz.executors.X86_64InterpreterExecutor;
 import dexfuzz.executors.X86_64OptimizingBackendExecutor;
-import dexfuzz.executors.X86_64QuickBackendExecutor;
 import dexfuzz.listeners.BaseListener;
 import dexfuzz.program.Mutation;
 import dexfuzz.program.Program;
@@ -121,18 +115,13 @@
     }
   }
 
-  private void addExecutorsForArchitecture(Device device, Class<? extends Executor> quick,
-      Class<? extends Executor> optimizing, Class<? extends Executor> interpreter) {
-    // NB: Currently QuickBackend MUST come immediately before same arch's Interpreter.
+  private void addExecutorsForArchitecture(Device device, Class<? extends Executor> optimizing,
+      Class<? extends Executor> interpreter) {
+    // NB: Currently OptimizingBackend MUST come immediately before same arch's Interpreter.
     // This is because intepreter execution relies on there being an OAT file already
     // created to produce correct debug information. Otherwise we will see
     // false-positive divergences.
     try {
-      if (Options.useQuick) {
-        Constructor<? extends Executor> constructor =
-            quick.getConstructor(BaseListener.class, Device.class);
-        executors.add(constructor.newInstance(listener, device));
-      }
       if (Options.useOptimizing) {
         Constructor<? extends Executor> constructor =
             optimizing.getConstructor(BaseListener.class, Device.class);
@@ -165,33 +154,33 @@
     }
 
     if (Options.useArchArm64) {
-      addExecutorsForArchitecture(device, Arm64QuickBackendExecutor.class,
-          Arm64OptimizingBackendExecutor.class, Arm64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, Arm64OptimizingBackendExecutor.class,
+          Arm64InterpreterExecutor.class);
     }
 
     if (Options.useArchArm) {
-      addExecutorsForArchitecture(device, ArmQuickBackendExecutor.class,
-          ArmOptimizingBackendExecutor.class, ArmInterpreterExecutor.class);
+      addExecutorsForArchitecture(device, ArmOptimizingBackendExecutor.class,
+          ArmInterpreterExecutor.class);
     }
 
     if (Options.useArchX86_64) {
-      addExecutorsForArchitecture(device, X86_64QuickBackendExecutor.class,
-          X86_64OptimizingBackendExecutor.class, X86_64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, X86_64OptimizingBackendExecutor.class,
+          X86_64InterpreterExecutor.class);
     }
 
     if (Options.useArchX86) {
-      addExecutorsForArchitecture(device, X86QuickBackendExecutor.class,
-          X86OptimizingBackendExecutor.class, X86InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, X86OptimizingBackendExecutor.class,
+          X86InterpreterExecutor.class);
     }
 
     if (Options.useArchMips64) {
-      addExecutorsForArchitecture(device, Mips64QuickBackendExecutor.class,
-          Mips64OptimizingBackendExecutor.class, Mips64InterpreterExecutor.class);
+      addExecutorsForArchitecture(device, Mips64OptimizingBackendExecutor.class,
+          Mips64InterpreterExecutor.class);
     }
 
     if (Options.useArchMips) {
-      addExecutorsForArchitecture(device, MipsQuickBackendExecutor.class,
-          MipsOptimizingBackendExecutor.class, MipsInterpreterExecutor.class);
+      addExecutorsForArchitecture(device, MipsOptimizingBackendExecutor.class,
+          MipsInterpreterExecutor.class);
     }
 
     // Add the first backend as the golden executor for self-divergence tests.