Merge "MIPS64: Implement table-based packed switch"
diff --git a/build/art.go b/build/art.go
index 0af1767..e6e0544 100644
--- a/build/art.go
+++ b/build/art.go
@@ -58,7 +58,7 @@
 		asflags = append(asflags, "-DART_HEAP_POISONING=1")
 	}
 
-	if envTrue(ctx, "ART_USE_READ_BARRIER") {
+	if envTrue(ctx, "ART_USE_READ_BARRIER") || ctx.AConfig().ArtUseReadBarrier() {
 		// Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP.
 		// The default is BAKER.
 		barrierType := envDefault(ctx, "ART_READ_BARRIER_TYPE", "BAKER")
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 9d39bf2..00a7d44 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,9 +46,7 @@
 void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
   DCHECK(method_verifier != nullptr);
   MethodReference ref = method_verifier->GetMethodReference();
-  bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
-  std::unique_ptr<const VerifiedMethod> verified_method(
-      VerifiedMethod::Create(method_verifier, compile));
+  std::unique_ptr<const VerifiedMethod> verified_method(VerifiedMethod::Create(method_verifier));
   if (verified_method == nullptr) {
     // We'll punt this later.
     return;
@@ -84,7 +82,6 @@
     // TODO: Investigate why are we doing the work again for this method and try to avoid it.
     LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
     if (!Runtime::Current()->UseJitCompilation()) {
-      DCHECK_EQ(existing->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
       DCHECK_EQ(existing->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
     }
     // Let the unique_ptr delete the new verified method since there was already an existing one
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index a5979cc..cbca333 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -42,25 +42,12 @@
       has_runtime_throw_(has_runtime_throw) {
 }
 
-const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier,
-                                             bool compile) {
+const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier) {
+  DCHECK(Runtime::Current()->IsAotCompiler());
   std::unique_ptr<VerifiedMethod> verified_method(
       new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(),
                          method_verifier->HasInstructionThatWillThrow()));
 
-  if (compile) {
-    // TODO: move this out when DEX-to-DEX supports devirtualization.
-    if (method_verifier->HasVirtualOrInterfaceInvokes()) {
-      verified_method->GenerateDevirtMap(method_verifier);
-    }
-
-    // Only need dequicken info for JIT so far.
-    if (Runtime::Current()->UseJitCompilation() &&
-        !verified_method->GenerateDequickenMap(method_verifier)) {
-      return nullptr;
-    }
-  }
-
   if (method_verifier->HasCheckCasts()) {
     verified_method->GenerateSafeCastSet(method_verifier);
   }
@@ -68,140 +55,10 @@
   return verified_method.release();
 }
 
-const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const {
-  auto it = devirt_map_.find(dex_pc);
-  return (it != devirt_map_.end()) ? &it->second : nullptr;
-}
-
-const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
-  DCHECK(Runtime::Current()->UseJitCompilation());
-  auto it = dequicken_map_.find(dex_pc);
-  return (it != dequicken_map_.end()) ? &it->second : nullptr;
-}
-
 bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
   return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
 }
 
-bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
-  if (method_verifier->HasFailures()) {
-    return false;
-  }
-  const DexFile::CodeItem* code_item = method_verifier->CodeItem();
-  const uint16_t* insns = code_item->insns_;
-  const Instruction* inst = Instruction::At(insns);
-  const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
-  for (; inst < end; inst = inst->Next()) {
-    const bool is_virtual_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK;
-    const bool is_range_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
-    if (is_virtual_quick || is_range_quick) {
-      uint32_t dex_pc = inst->GetDexPc(insns);
-      verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
-      ArtMethod* method =
-          method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true);
-      if (method == nullptr) {
-        // It can be null if the line wasn't verified since it was unreachable.
-        return false;
-      }
-      // The verifier must know what the type of the object was or else we would have gotten a
-      // failure. Put the dex method index in the dequicken map since we need this to get number of
-      // arguments in the compiler.
-      dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(),
-                                                  method->GetDexMethodIndex()));
-    } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
-      uint32_t dex_pc = inst->GetDexPc(insns);
-      verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
-      ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
-      if (field == nullptr) {
-        // It can be null if the line wasn't verified since it was unreachable.
-        return false;
-      }
-      // The verifier must know what the type of the field was or else we would have gotten a
-      // failure. Put the dex field index in the dequicken map since we need this for lowering
-      // in the compiler.
-      // TODO: Putting a field index in a method reference is gross.
-      dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
-    }
-  }
-  return true;
-}
-
-void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) {
-  // It is risky to rely on reg_types for sharpening in cases of soft
-  // verification, we might end up sharpening to a wrong implementation. Just abort.
-  if (method_verifier->HasFailures()) {
-    return;
-  }
-
-  const DexFile::CodeItem* code_item = method_verifier->CodeItem();
-  const uint16_t* insns = code_item->insns_;
-  const Instruction* inst = Instruction::At(insns);
-  const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
-
-  for (; inst < end; inst = inst->Next()) {
-    const bool is_virtual = inst->Opcode() == Instruction::INVOKE_VIRTUAL ||
-        inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE;
-    const bool is_interface = inst->Opcode() == Instruction::INVOKE_INTERFACE ||
-        inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
-
-    if (!is_interface && !is_virtual) {
-      continue;
-    }
-    // Get reg type for register holding the reference to the object that will be dispatched upon.
-    uint32_t dex_pc = inst->GetDexPc(insns);
-    verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
-    const bool is_range = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
-        inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
-    const verifier::RegType&
-        reg_type(line->GetRegisterType(method_verifier,
-                                       is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
-
-    if (!reg_type.HasClass()) {
-      // We will compute devirtualization information only when we know the Class of the reg type.
-      continue;
-    }
-    mirror::Class* reg_class = reg_type.GetClass();
-    if (reg_class->IsInterface()) {
-      // We can't devirtualize when the known type of the register is an interface.
-      continue;
-    }
-    if (reg_class->IsAbstract() && !reg_class->IsArrayClass()) {
-      // We can't devirtualize abstract classes except on arrays of abstract classes.
-      continue;
-    }
-    auto* cl = Runtime::Current()->GetClassLinker();
-    PointerSize pointer_size = cl->GetImagePointerSize();
-    ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
-        is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
-    if (abstract_method == nullptr) {
-      // If the method is not found in the cache this means that it was never found
-      // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
-      continue;
-    }
-    // Find the concrete method.
-    ArtMethod* concrete_method = nullptr;
-    if (is_interface) {
-      concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(
-          abstract_method, pointer_size);
-    }
-    if (is_virtual) {
-      concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(
-          abstract_method, pointer_size);
-    }
-    if (concrete_method == nullptr || !concrete_method->IsInvokable()) {
-      // In cases where concrete_method is not found, or is not invokable, continue to the next
-      // invoke.
-      continue;
-    }
-    if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
-        concrete_method->GetDeclaringClass()->IsFinal()) {
-      // If we knew exactly the class being dispatched upon, or if the target method cannot be
-      // overridden record the target to be used in the compiler driver.
-      devirt_map_.Put(dex_pc, concrete_method->ToMethodReference());
-    }
-  }
-}
-
 void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) {
   /*
    * Walks over the method code and adds any cast instructions in which
@@ -229,7 +86,21 @@
                                                               inst->VRegA_21c()));
       const verifier::RegType& cast_type =
           method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c()));
-      if (cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier)) {
+      // Pass null for the method verifier to not record the VerifierDeps dependency
+      // if the types are not assignable.
+      if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) {
+        // The types are assignable, we record that dependency in the VerifierDeps so
+        // that if this changes after OTA, we will re-verify again.
+        // We check if reg_type has a class, as the verifier may have inferred it's
+        // 'null'.
+        if (reg_type.HasClass()) {
+          DCHECK(cast_type.HasClass());
+          verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(),
+                                                           cast_type.GetClass(),
+                                                           reg_type.GetClass(),
+                                                           /* strict */ true,
+                                                           /* assignable */ true);
+        }
         // Verify ordering for push_back() to the sorted vector.
         DCHECK(safe_cast_set_.empty() || safe_cast_set_.back() < dex_pc);
         safe_cast_set_.push_back(dex_pc);
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index ce53417..439e69e 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -39,31 +39,14 @@
   // is better for performance (not just memory usage), especially for large sets.
   typedef std::vector<uint32_t> SafeCastSet;
 
-  // Devirtualization map type maps dex offset to concrete method reference.
-  typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap;
-
-  // Devirtualization map type maps dex offset to field / method idx.
-  typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
-
-  static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
+  static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier)
       REQUIRES_SHARED(Locks::mutator_lock_);
   ~VerifiedMethod() = default;
 
-  const DevirtualizationMap& GetDevirtMap() const {
-    return devirt_map_;
-  }
-
   const SafeCastSet& GetSafeCastSet() const {
     return safe_cast_set_;
   }
 
-  // Returns the devirtualization target method, or null if none.
-  const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
-
-  // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
-  // no entry for that dex pc.
-  const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const;
-
   // Returns true if the cast can statically be verified to be redundant
   // by using the check-cast elision peephole optimization in the verifier.
   bool IsSafeCast(uint32_t pc) const;
@@ -82,38 +65,10 @@
   }
 
  private:
-  /*
-   * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
-   * verification). For type-precise determination we have all the data we need, so we just need to
-   * encode it in some clever fashion.
-   * Stores the data in dex_gc_map_, returns true on success and false on failure.
-   */
-  bool GenerateGcMap(verifier::MethodVerifier* method_verifier);
-
-  // Verify that the GC map associated with method_ is well formed.
-  static void VerifyGcMap(verifier::MethodVerifier* method_verifier,
-                          const std::vector<uint8_t>& data);
-
-  // Compute sizes for GC map data.
-  static void ComputeGcMapSizes(verifier::MethodVerifier* method_verifier,
-                                size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc);
-
-  // Generate devirtualizaion map into devirt_map_.
-  void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Generate dequickening map into dequicken_map_. Returns false if there is an error.
-  bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Generate safe case set into safe_cast_set_.
   void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  DevirtualizationMap devirt_map_;
-  // Dequicken map is required for compiling quickened byte codes. The quicken maps from
-  // dex PC to dex method index or dex field index based on the instruction.
-  DequickenMap dequicken_map_;
   SafeCastSet safe_cast_set_;
 
   const uint32_t encountered_error_types_;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ec1642e..2950266 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -423,7 +423,7 @@
   // Compile:
   // 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex
   //    compilation.
-  if (!GetCompilerOptions().VerifyAtRuntime() && !GetCompilerOptions().VerifyOnlyProfile()) {
+  if (GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
     Compile(class_loader, dex_files, timings);
   }
   if (dump_stats_) {
@@ -494,11 +494,15 @@
     // TODO: we unquicken unconditionnally, as we don't know
     // if the boot image has changed. How exactly we'll know is under
     // experimentation.
-    TimingLogger::ScopedTiming t("Unquicken", timings);
-    // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
-    // optimization does not depend on the boot image (the optimization relies on not
-    // having final fields in a class, which does not change for an app).
-    Unquicken(dex_files, vdex_file->GetQuickeningInfo(), /* decompile_return_instruction */ false);
+    if (vdex_file->GetQuickeningInfo().size() != 0) {
+      TimingLogger::ScopedTiming t("Unquicken", timings);
+      // We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
+      // optimization does not depend on the boot image (the optimization relies on not
+      // having final fields in a class, which does not change for an app).
+      Unquicken(dex_files,
+                vdex_file->GetQuickeningInfo(),
+                /* decompile_return_instruction */ false);
+    }
     Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(
         new verifier::VerifierDeps(dex_files, vdex_file->GetVerifierDepsData()));
   }
@@ -510,10 +514,7 @@
     const DexFile& dex_file, const DexFile::ClassDef& class_def)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   auto* const runtime = Runtime::Current();
-  if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
-    // Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
-    return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
-  }
+  DCHECK(driver.GetCompilerOptions().IsAnyMethodCompilationEnabled());
   const char* descriptor = dex_file.GetClassDescriptor(class_def);
   ClassLinker* class_linker = runtime->GetClassLinker();
   mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
@@ -954,24 +955,17 @@
   LoadImageClasses(timings);
   VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
 
-  const bool verification_enabled = compiler_options_->IsVerificationEnabled();
-  const bool never_verify = compiler_options_->NeverVerify();
-  const bool verify_only_profile = compiler_options_->VerifyOnlyProfile();
-
-  // We need to resolve for never_verify since it needs to run dex to dex to add the
-  // RETURN_VOID_NO_BARRIER.
-  // Let the verifier resolve as needed for the verify_only_profile case.
-  if ((never_verify || verification_enabled) && !verify_only_profile) {
+  if (compiler_options_->IsAnyMethodCompilationEnabled()) {
     Resolve(class_loader, dex_files, timings);
     VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false);
   }
 
-  if (never_verify) {
+  if (compiler_options_->AssumeClassesAreVerified()) {
     VLOG(compiler) << "Verify none mode specified, skipping verification.";
     SetVerified(class_loader, dex_files, timings);
   }
 
-  if (!verification_enabled) {
+  if (!compiler_options_->IsVerificationEnabled()) {
     return;
   }
 
@@ -989,7 +983,7 @@
                << "situations. Please check the log.";
   }
 
-  if (!verify_only_profile) {
+  if (compiler_options_->IsAnyMethodCompilationEnabled()) {
     InitializeClasses(class_loader, dex_files, timings);
     VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
   }
@@ -1537,90 +1531,6 @@
   }
 }
 
-void CompilerDriver::GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
-                                                   ArtMethod* method,
-                                                   uintptr_t* direct_code,
-                                                   uintptr_t* direct_method) {
-  // For direct and static methods compute possible direct_code and direct_method values, ie
-  // an address for the Method* being invoked and an address of the code for that Method*.
-  // For interface calls compute a value for direct_method that is the interface method being
-  // invoked, so this can be passed to the out-of-line runtime support code.
-  *direct_code = 0;
-  *direct_method = 0;
-  Runtime* const runtime = Runtime::Current();
-  gc::Heap* const heap = runtime->GetHeap();
-  auto* cl = runtime->GetClassLinker();
-  bool use_dex_cache = GetCompilerOptions().GetCompilePic();  // Off by default
-  const bool compiling_boot = heap->IsCompilingBoot();
-  // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
-  const bool force_relocations = (compiling_boot ||
-                                  GetCompilerOptions().GetIncludePatchInformation());
-  // TODO: support patching on all architectures.
-  use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
-  mirror::Class* declaring_class = method->GetDeclaringClass();
-  bool method_code_in_boot = declaring_class->GetClassLoader() == nullptr;
-  if (!use_dex_cache) {
-    if (!method_code_in_boot) {
-      use_dex_cache = true;
-    } else if (method->IsStatic() &&
-               declaring_class != referrer_class &&
-               !declaring_class->IsInitialized()) {
-      // Ensure we run the clinit trampoline unless we are invoking a static method in the same
-      // class.
-      use_dex_cache = true;
-    }
-  }
-  if (runtime->UseJitCompilation()) {
-    // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
-    // never be updated even after we compile the method.
-    if (cl->IsQuickToInterpreterBridge(
-        reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)))) {
-      use_dex_cache = true;
-    }
-  }
-
-  if (!use_dex_cache && force_relocations) {
-    bool is_in_image;
-    if (GetCompilerOptions().IsBootImage()) {
-      is_in_image = IsImageClass(method->GetDeclaringClassDescriptor());
-    } else {
-      is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 &&
-                    heap->FindSpaceFromObject(method->GetDeclaringClass(), false)->IsImageSpace() &&
-                    !cl->IsQuickToInterpreterBridge(
-                        reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)));
-    }
-    if (!is_in_image) {
-      // We can only branch directly to Methods that are resolved in the DexCache.
-      // Otherwise we won't invoke the resolution trampoline.
-      use_dex_cache = true;
-    }
-  }
-
-  if (!use_dex_cache) {
-    bool method_in_image = false;
-    const std::vector<gc::space::ImageSpace*>& image_spaces = heap->GetBootImageSpaces();
-    for (gc::space::ImageSpace* image_space : image_spaces) {
-      const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
-      if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
-        method_in_image = true;
-        break;
-      }
-    }
-    if (method_in_image || compiling_boot || runtime->UseJitCompilation()) {
-      // We know we must be able to get to the method in the image, so use that pointer.
-      // In the case where we are the JIT, we can always use direct pointers since we know where
-      // the method and its code are / will be. We don't sharpen to interpreter bridge since we
-      // check IsQuickToInterpreterBridge above.
-      CHECK(!method->IsAbstract());
-      *direct_method = force_relocations ? -1 : reinterpret_cast<uintptr_t>(method);
-      *direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
-    } else {
-      // Set the code and rely on the dex cache for the method.
-      *direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
-    }
-  }
-}
-
 const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
                                                         uint32_t method_idx) const {
   MethodReference ref(dex_file, method_idx);
@@ -2069,7 +1979,7 @@
         for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
           const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
           if (set.find(class_def.class_idx_) == set.end()) {
-            if (GetCompilerOptions().VerifyOnlyProfile()) {
+            if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
               // Just update the compiled_classes_ map. The compiler doesn't need to resolve
               // the type.
               compiled_classes_.Overwrite(
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index eb69931..2e3b7c8 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -378,13 +378,6 @@
     return current_dex_to_dex_methods_;
   }
 
-  // Compute constant code and method pointers when possible.
-  void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
-                                     ArtMethod* method,
-                                     /* out */ uintptr_t* direct_code,
-                                     /* out */ uintptr_t* direct_method)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
  private:
   // Return whether the declaring class of `resolved_member` is
   // available to `referrer_class` for read or write access using two
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 9c62f80..6894cd5 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -109,7 +109,7 @@
     return CompilerFilter::IsVerificationEnabled(compiler_filter_);
   }
 
-  bool NeverVerify() const {
+  bool AssumeClassesAreVerified() const {
     return compiler_filter_ == CompilerFilter::kVerifyNone;
   }
 
@@ -117,6 +117,10 @@
     return compiler_filter_ == CompilerFilter::kVerifyProfile;
   }
 
+  bool IsAnyMethodCompilationEnabled() const {
+    return CompilerFilter::IsAnyMethodCompilationEnabled(compiler_filter_);
+  }
+
   size_t GetHugeMethodThreshold() const {
     return huge_method_threshold_;
   }
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 2686abc..b0225a3 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -254,6 +254,7 @@
             driver->GetInstructionSetFeatures(),
             &key_value_store,
             /* verify */ false,           // Dex files may be dex-to-dex-ed, don't verify.
+            /* update_input_vdex */ false,
             &cur_opened_dex_files_map,
             &cur_opened_dex_files);
         ASSERT_TRUE(dex_files_ok);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cc7df1c..c537483 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -27,7 +27,6 @@
 #include <string>
 #include <ostream>
 
-#include "art_method.h"
 #include "base/bit_utils.h"
 #include "base/dchecked_vector.h"
 #include "base/enums.h"
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index edc93ab..86d92ff 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -198,6 +198,7 @@
                                          compiler_driver_->GetInstructionSetFeatures(),
                                          &key_value_store,
                                          verify,
+                                         /* update_input_vdex */ false,
                                          &opened_dex_files_map,
                                          &opened_dex_files)) {
       return false;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bebd5f5..a9da09c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -483,6 +483,7 @@
     const InstructionSetFeatures* instruction_set_features,
     SafeMap<std::string, std::string>* key_value_store,
     bool verify,
+    bool update_input_vdex,
     /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
     /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
   CHECK(write_state_ == WriteState::kAddingDexFileSources);
@@ -511,15 +512,15 @@
   if (kIsVdexEnabled) {
     std::unique_ptr<BufferedOutputStream> vdex_out(
         MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
-
     // Write DEX files into VDEX, mmap and open them.
-    if (!WriteDexFiles(vdex_out.get(), vdex_file) ||
+    if (!WriteDexFiles(vdex_out.get(), vdex_file, update_input_vdex) ||
         !OpenDexFiles(vdex_file, verify, &dex_files_map, &dex_files)) {
       return false;
     }
   } else {
+    DCHECK(!update_input_vdex);
     // Write DEX files into OAT, mmap and open them.
-    if (!WriteDexFiles(oat_rodata, vdex_file) ||
+    if (!WriteDexFiles(oat_rodata, vdex_file, update_input_vdex) ||
         !OpenDexFiles(vdex_file, verify, &dex_files_map, &dex_files)) {
       return false;
     }
@@ -1468,30 +1469,32 @@
       if (UNLIKELY(!visitor->StartClass(dex_file, class_def_index))) {
         return false;
       }
-      const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
-      const uint8_t* class_data = dex_file->GetClassData(class_def);
-      if (class_data != nullptr) {  // ie not an empty class, such as a marker interface
-        ClassDataItemIterator it(*dex_file, class_data);
-        while (it.HasNextStaticField()) {
-          it.Next();
-        }
-        while (it.HasNextInstanceField()) {
-          it.Next();
-        }
-        size_t class_def_method_index = 0u;
-        while (it.HasNextDirectMethod()) {
-          if (!visitor->VisitMethod(class_def_method_index, it)) {
-            return false;
+      if (compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+        const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+        const uint8_t* class_data = dex_file->GetClassData(class_def);
+        if (class_data != nullptr) {  // ie not an empty class, such as a marker interface
+          ClassDataItemIterator it(*dex_file, class_data);
+          while (it.HasNextStaticField()) {
+            it.Next();
           }
-          ++class_def_method_index;
-          it.Next();
-        }
-        while (it.HasNextVirtualMethod()) {
-          if (UNLIKELY(!visitor->VisitMethod(class_def_method_index, it))) {
-            return false;
+          while (it.HasNextInstanceField()) {
+            it.Next();
           }
-          ++class_def_method_index;
-          it.Next();
+          size_t class_def_method_index = 0u;
+          while (it.HasNextDirectMethod()) {
+            if (!visitor->VisitMethod(class_def_method_index, it)) {
+              return false;
+            }
+            ++class_def_method_index;
+            it.Next();
+          }
+          while (it.HasNextVirtualMethod()) {
+            if (UNLIKELY(!visitor->VisitMethod(class_def_method_index, it))) {
+              return false;
+            }
+            ++class_def_method_index;
+            it.Next();
+          }
         }
       }
       if (UNLIKELY(!visitor->EndClass())) {
@@ -1548,6 +1551,9 @@
 }
 
 size_t OatWriter::InitOatMaps(size_t offset) {
+  if (!compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+    return offset;
+  }
   InitMapMethodVisitor visitor(this, offset);
   bool success = VisitDexMethods(&visitor);
   DCHECK(success);
@@ -1594,6 +1600,9 @@
 }
 
 size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
+  if (!compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+    return offset;
+  }
   InitCodeMethodVisitor code_visitor(this, offset, vdex_quickening_info_offset_);
   bool success = VisitDexMethods(&code_visitor);
   DCHECK(success);
@@ -1745,19 +1754,24 @@
     return false;
   }
 
-  WriteQuickeningInfoMethodVisitor visitor(this, vdex_out, start_offset);
-  if (!VisitDexMethods(&visitor)) {
-    PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
-    return false;
+  if (compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+    WriteQuickeningInfoMethodVisitor visitor(this, vdex_out, start_offset);
+    if (!VisitDexMethods(&visitor)) {
+      PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
+      return false;
+    }
+
+    if (!vdex_out->Flush()) {
+      PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+                  << " File: " << vdex_out->GetLocation();
+      return false;
+    }
+    size_quickening_info_ = visitor.GetNumberOfWrittenBytes();
+  } else {
+    // We know we did not quicken.
+    size_quickening_info_ = 0;
   }
 
-  if (!vdex_out->Flush()) {
-    PLOG(ERROR) << "Failed to flush stream after writing quickening info."
-                << " File: " << vdex_out->GetLocation();
-    return false;
-  }
-
-  size_quickening_info_ = visitor.GetNumberOfWrittenBytes();
   vdex_size_ += size_quickening_info_;
   return true;
 }
@@ -2096,47 +2110,56 @@
   return true;
 }
 
-bool OatWriter::WriteDexFiles(OutputStream* out, File* file) {
+bool OatWriter::WriteDexFiles(OutputStream* out, File* file, bool update_input_vdex) {
   TimingLogger::ScopedTiming split("Write Dex files", timings_);
 
   vdex_dex_files_offset_ = vdex_size_;
 
   // Write dex files.
   for (OatDexFile& oat_dex_file : oat_dex_files_) {
-    if (!WriteDexFile(out, file, &oat_dex_file)) {
+    if (!WriteDexFile(out, file, &oat_dex_file, update_input_vdex)) {
       return false;
     }
   }
 
-  // Close sources.
+  CloseSources();
+  return true;
+}
+
+void OatWriter::CloseSources() {
   for (OatDexFile& oat_dex_file : oat_dex_files_) {
     oat_dex_file.source_.Clear();  // Get rid of the reference, it's about to be invalidated.
   }
   zipped_dex_files_.clear();
   zip_archives_.clear();
   raw_dex_files_.clear();
-  return true;
 }
 
-bool OatWriter::WriteDexFile(OutputStream* out, File* file, OatDexFile* oat_dex_file) {
+bool OatWriter::WriteDexFile(OutputStream* out,
+                             File* file,
+                             OatDexFile* oat_dex_file,
+                             bool update_input_vdex) {
   if (!SeekToDexFile(out, file, oat_dex_file)) {
     return false;
   }
   if (profile_compilation_info_ != nullptr) {
+    DCHECK(!update_input_vdex);
     if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
       return false;
     }
   } else if (oat_dex_file->source_.IsZipEntry()) {
+    DCHECK(!update_input_vdex);
     if (!WriteDexFile(out, file, oat_dex_file, oat_dex_file->source_.GetZipEntry())) {
       return false;
     }
   } else if (oat_dex_file->source_.IsRawFile()) {
+    DCHECK(!update_input_vdex);
     if (!WriteDexFile(out, file, oat_dex_file, oat_dex_file->source_.GetRawFile())) {
       return false;
     }
   } else {
     DCHECK(oat_dex_file->source_.IsRawData());
-    if (!WriteDexFile(out, oat_dex_file, oat_dex_file->source_.GetRawData())) {
+    if (!WriteDexFile(out, oat_dex_file, oat_dex_file->source_.GetRawData(), update_input_vdex)) {
       return false;
     }
   }
@@ -2146,6 +2169,7 @@
     DCHECK_EQ(vdex_size_, oat_dex_file->dex_file_offset_);
     vdex_size_ += oat_dex_file->dex_file_size_;
   } else {
+    DCHECK(!update_input_vdex);
     DCHECK_EQ(oat_size_, oat_dex_file->dex_file_offset_);
     oat_size_ += oat_dex_file->dex_file_size_;
   }
@@ -2216,7 +2240,7 @@
   DexLayout dex_layout(options, profile_compilation_info_, nullptr);
   dex_layout.ProcessDexFile(location.c_str(), dex_file.get(), 0);
   std::unique_ptr<MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
-  if (!WriteDexFile(out, oat_dex_file, mem_map->Begin())) {
+  if (!WriteDexFile(out, oat_dex_file, mem_map->Begin(), /* update_input_vdex */ false)) {
     return false;
   }
   // Set the checksum of the new oat dex file to be the original file's checksum.
@@ -2373,22 +2397,27 @@
 
 bool OatWriter::WriteDexFile(OutputStream* out,
                              OatDexFile* oat_dex_file,
-                             const uint8_t* dex_file) {
+                             const uint8_t* dex_file,
+                             bool update_input_vdex) {
   // Note: The raw data has already been checked to contain the header
   // and all the data that the header specifies as the file size.
   DCHECK(dex_file != nullptr);
   DCHECK(ValidateDexFileHeader(dex_file, oat_dex_file->GetLocation()));
   const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(dex_file);
 
-  if (!out->WriteFully(dex_file, header->file_size_)) {
-    PLOG(ERROR) << "Failed to write dex file " << oat_dex_file->GetLocation()
-                << " to " << out->GetLocation();
-    return false;
-  }
-  if (!out->Flush()) {
-    PLOG(ERROR) << "Failed to flush stream after writing dex file."
-                << " File: " << oat_dex_file->GetLocation();
-    return false;
+  if (update_input_vdex) {
+    // The vdex already contains the dex code, no need to write it again.
+  } else {
+    if (!out->WriteFully(dex_file, header->file_size_)) {
+      PLOG(ERROR) << "Failed to write dex file " << oat_dex_file->GetLocation()
+                  << " to " << out->GetLocation();
+      return false;
+    }
+    if (!out->Flush()) {
+      PLOG(ERROR) << "Failed to flush stream after writing dex file."
+                  << " File: " << oat_dex_file->GetLocation();
+      return false;
+    }
   }
 
   // Update dex file size and resize class offsets in the OatDexFile.
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index da221d6..8d087f4 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -158,12 +158,15 @@
   // Supporting data structures are written into the .rodata section of the oat file.
   // The `verify` setting dictates whether the dex file verifier should check the dex files.
   // This is generally the case, and should only be false for tests.
+  // If `update_input_vdex` is true, then this method won't actually write the dex files,
+  // and the compiler will just re-use the existing vdex file.
   bool WriteAndOpenDexFiles(File* vdex_file,
                             OutputStream* oat_rodata,
                             InstructionSet instruction_set,
                             const InstructionSetFeatures* instruction_set_features,
                             SafeMap<std::string, std::string>* key_value_store,
                             bool verify,
+                            bool update_input_vdex,
                             /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
                             /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
   bool WriteQuickeningInfo(OutputStream* vdex_out);
@@ -263,8 +266,13 @@
   // with a given DexMethodVisitor.
   bool VisitDexMethods(DexMethodVisitor* visitor);
 
-  bool WriteDexFiles(OutputStream* out, File* file);
-  bool WriteDexFile(OutputStream* out, File* file, OatDexFile* oat_dex_file);
+  // If `update_input_vdex` is true, then this method won't actually write the dex files,
+  // and the compiler will just re-use the existing vdex file.
+  bool WriteDexFiles(OutputStream* out, File* file, bool update_input_vdex);
+  bool WriteDexFile(OutputStream* out,
+                    File* file,
+                    OatDexFile* oat_dex_file,
+                    bool update_input_vdex);
   bool SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex_file);
   bool LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_file);
   bool WriteDexFile(OutputStream* out,
@@ -275,7 +283,10 @@
                     File* file,
                     OatDexFile* oat_dex_file,
                     File* dex_file);
-  bool WriteDexFile(OutputStream* out, OatDexFile* oat_dex_file, const uint8_t* dex_file);
+  bool WriteDexFile(OutputStream* out,
+                    OatDexFile* oat_dex_file,
+                    const uint8_t* dex_file,
+                    bool update_input_vdex);
   bool OpenDexFiles(File* file,
                     bool verify,
                     /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
@@ -306,6 +317,7 @@
                              const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
   bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
   void SetMultiOatRelativePatcherAdjustment();
+  void CloseSources();
 
   enum class WriteState {
     kAddingDexFileSources,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8104613..8a7f6d3 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1201,11 +1201,6 @@
       isa_features_(isa_features),
       uint32_literals_(std::less<uint32_t>(),
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      method_patches_(MethodReferenceComparator(),
-                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      call_patches_(MethodReferenceComparator(),
-                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(StringReferenceValueComparator(),
                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -7139,7 +7134,7 @@
 
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      HInvokeStaticOrDirect* invoke) {
+      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
   HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
   // We disable pc-relative load when there is an irreducible loop, as the optimization
   // is incompatible with it.
@@ -7151,24 +7146,6 @@
     dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
   }
 
-  if (dispatch_info.code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
-    const DexFile& outer_dex_file = GetGraph()->GetDexFile();
-    if (&outer_dex_file != invoke->GetTargetMethod().dex_file) {
-      // Calls across dex files are more likely to exceed the available BL range,
-      // so use absolute patch with fixup if available and kCallArtMethod otherwise.
-      HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
-          (desired_dispatch_info.method_load_kind ==
-           HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
-          ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
-          : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-      return HInvokeStaticOrDirect::DispatchInfo {
-        dispatch_info.method_load_kind,
-        code_ptr_location,
-        dispatch_info.method_load_data,
-        0u
-      };
-    }
-  }
   return dispatch_info;
 }
 
@@ -7199,20 +7176,6 @@
 }
 
 void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
-  // For better instruction scheduling we load the direct code pointer before the method pointer.
-  switch (invoke->GetCodePtrLocation()) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // LR = code address from literal pool with link-time patch.
-      __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR = invoke->GetDirectCodePtr();
-      __ LoadImmediate(LR, invoke->GetDirectCodePtr());
-      break;
-    default:
-      break;
-  }
-
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   switch (invoke->GetMethodLoadKind()) {
     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -7228,10 +7191,6 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ LoadImmediate(temp.AsRegister<Register>(), invoke->GetMethodAddress());
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ LoadLiteral(temp.AsRegister<Register>(),
-                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       HArmDexCacheArraysBase* base =
           invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
@@ -7270,19 +7229,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ bl(GetFrameEntryLabel());
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                          invoke->GetTargetMethod().dex_method_index);
-      __ BindTrackedLabel(&relative_call_patches_.back().label);
-      // Arbitrarily branch to the BL itself, override at link time.
-      __ bl(&relative_call_patches_.back().label);
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR prepared above for better instruction scheduling.
-      // LR()
-      __ blx(LR);
-      break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // LR = callee_method->entry_point_from_quick_compiled_code_
       __ LoadFromOffset(
@@ -7410,9 +7356,6 @@
 void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      call_patches_.size() +
-      relative_call_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
       boot_image_string_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
@@ -7420,29 +7363,6 @@
       /* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
       boot_image_address_patches_.size();
   linker_patches->reserve(size);
-  for (const auto& entry : method_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = literal->GetLabel()->Position();
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
-                                                       target_method.dex_file,
-                                                       target_method.dex_method_index));
-  }
-  for (const auto& entry : call_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = literal->GetLabel()->Position();
-    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
-                                                     target_method.dex_file,
-                                                     target_method.dex_method_index));
-  }
-  for (const PatchInfo<Label>& info : relative_call_patches_) {
-    uint32_t literal_offset = info.label.Position();
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
   for (const auto& entry : boot_image_string_patches_) {
@@ -7494,14 +7414,6 @@
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
 }
 
-Literal* CodeGeneratorARM::DeduplicateMethodAddressLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &method_patches_);
-}
-
-Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &call_patches_);
-}
-
 void LocationsBuilderARM::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 605169d..6435851 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -607,8 +607,6 @@
 
   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
   Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
-  Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
                                           uint32_t offset_or_index,
                                           ArenaDeque<PcRelativePatchInfo>* patches);
@@ -627,12 +625,6 @@
 
   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
   Uint32ToLiteralMap uint32_literals_;
-  // Method patch info, map MethodReference to a literal for method address and method code.
-  MethodToLiteralMap method_patches_;
-  MethodToLiteralMap call_patches_;
-  // Relative call patch info.
-  // Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<PatchInfo<Label>> relative_call_patches_;
   // PC-relative patch info for each HArmDexCacheArraysBase.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5cff303..5c33fe1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1147,11 +1147,6 @@
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       uint64_literals_(std::less<uint64_t>(),
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      method_patches_(MethodReferenceComparator(),
-                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      call_patches_(MethodReferenceComparator(),
-                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(StringReferenceValueComparator(),
                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1538,8 +1533,17 @@
       DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
       DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
       UseScratchRegisterScope temps(GetVIXLAssembler());
-      // There is generally less pressure on FP registers.
-      FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
+      // Use any scratch register (a core or a floating-point one)
+      // from VIXL scratch register pools as a temporary.
+      //
+      // We used to only use the FP scratch register pool, but in some
+      // rare cases the only register from this pool (D31) would
+      // already be used (e.g. within a ParallelMove instruction, when
+      // a move is blocked by a another move requiring a scratch FP
+      // register, which would reserve D31). To prevent this issue, we
+      // ask for a scratch register of any type (core or FP).
+      CPURegister temp =
+          temps.AcquireCPURegisterOfSize(destination.IsDoubleStackSlot() ? kXRegSize : kWRegSize);
       __ Ldr(temp, StackOperandFrom(source));
       __ Str(temp, StackOperandFrom(destination));
     }
@@ -3971,23 +3975,6 @@
 }
 
 void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
-  // For better instruction scheduling we load the direct code pointer before the method pointer.
-  bool direct_code_loaded = false;
-  switch (invoke->GetCodePtrLocation()) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // LR = code address from literal pool with link-time patch.
-      __ Ldr(lr, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      direct_code_loaded = true;
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR = invoke->GetDirectCodePtr();
-      __ Ldr(lr, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
-      direct_code_loaded = true;
-      break;
-    default:
-      break;
-  }
-
   // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   switch (invoke->GetMethodLoadKind()) {
@@ -4005,11 +3992,6 @@
       // Load method address from literal pool.
       __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      // Load method address from literal pool with a link-time patch.
-      __ Ldr(XRegisterFrom(temp),
-             DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       // Add ADRP with its PC-relative DexCache access patch.
       const DexFile& dex_file = invoke->GetDexFile();
@@ -4051,23 +4033,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ Bl(&frame_entry_label_);
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
-      relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                          invoke->GetTargetMethod().dex_method_index);
-      vixl::aarch64::Label* label = &relative_call_patches_.back().label;
-      SingleEmissionCheckScope guard(GetVIXLAssembler());
-      __ Bind(label);
-      // Branch and link to itself. This will be overriden at link time.
-      __ bl(static_cast<int64_t>(0));
-      break;
-    }
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR prepared above for better instruction scheduling.
-      DCHECK(direct_code_loaded);
-      // lr()
-      __ Blr(lr);
-      break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // LR = callee_method->entry_point_from_quick_compiled_code_;
       __ Ldr(lr, MemOperand(
@@ -4229,9 +4194,6 @@
 void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      call_patches_.size() +
-      relative_call_patches_.size() +
       pc_relative_dex_cache_patches_.size() +
       boot_image_string_patches_.size() +
       pc_relative_string_patches_.size() +
@@ -4239,24 +4201,6 @@
       pc_relative_type_patches_.size() +
       boot_image_address_patches_.size();
   linker_patches->reserve(size);
-  for (const auto& entry : method_patches_) {
-    const MethodReference& target_method = entry.first;
-    vixl::aarch64::Literal<uint64_t>* literal = entry.second;
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal->GetOffset(),
-                                                       target_method.dex_file,
-                                                       target_method.dex_method_index));
-  }
-  for (const auto& entry : call_patches_) {
-    const MethodReference& target_method = entry.first;
-    vixl::aarch64::Literal<uint64_t>* literal = entry.second;
-    linker_patches->push_back(LinkerPatch::CodePatch(literal->GetOffset(),
-                                                     target_method.dex_file,
-                                                     target_method.dex_method_index));
-  }
-  for (const PatchInfo<vixl::aarch64::Label>& info : relative_call_patches_) {
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(info.label.GetLocation(), &info.dex_file, info.index));
-  }
   for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
     linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
                                                               &info.target_dex_file,
@@ -4314,17 +4258,6 @@
       [this]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(/* placeholder */ 0u); });
 }
 
-vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
-    MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &method_patches_);
-}
-
-vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
-    MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &call_patches_);
-}
-
-
 void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   // Explicit clinit checks triggered by static invokes must have been pruned by
   // art::PrepareForRegisterAllocation.
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 85b6f9f..8f33b6b 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -693,8 +693,6 @@
   vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
   vixl::aarch64::Literal<uint64_t>* DeduplicateMethodLiteral(MethodReference target_method,
                                                              MethodToLiteralMap* map);
-  vixl::aarch64::Literal<uint64_t>* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  vixl::aarch64::Literal<uint64_t>* DeduplicateMethodCodeLiteral(MethodReference target_method);
 
   // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
   // and boot image strings/types. The only difference is the interpretation of the
@@ -737,12 +735,6 @@
   Uint32ToLiteralMap uint32_literals_;
   // Deduplication map for 64-bit literals, used for non-patchable method address or method code.
   Uint64ToLiteralMap uint64_literals_;
-  // Method patch info, map MethodReference to a literal for method address and method code.
-  MethodToLiteralMap method_patches_;
-  MethodToLiteralMap call_patches_;
-  // Relative call patch info.
-  // Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<PatchInfo<vixl::aarch64::Label>> relative_call_patches_;
   // PC-relative DexCache access info.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 517bdbf..f108595 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1245,11 +1245,6 @@
       isa_features_(isa_features),
       uint32_literals_(std::less<uint32_t>(),
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      method_patches_(MethodReferenceComparator(),
-                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      call_patches_(MethodReferenceComparator(),
-                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(StringReferenceValueComparator(),
                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -7233,7 +7228,7 @@
 // otherwise return a fall-back info that should be used instead.
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
     const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-    HInvokeStaticOrDirect* invoke) {
+    HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
   HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
   // We disable pc-relative load when there is an irreducible loop, as the optimization
   // is incompatible with it.
@@ -7245,24 +7240,6 @@
     dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
   }
 
-  if (dispatch_info.code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
-    const DexFile& outer_dex_file = GetGraph()->GetDexFile();
-    if (&outer_dex_file != invoke->GetTargetMethod().dex_file) {
-      // Calls across dex files are more likely to exceed the available BL range,
-      // so use absolute patch with fixup if available and kCallArtMethod otherwise.
-      HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
-          (desired_dispatch_info.method_load_kind ==
-           HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
-          ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
-          : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-      return HInvokeStaticOrDirect::DispatchInfo {
-        dispatch_info.method_load_kind,
-        code_ptr_location,
-        dispatch_info.method_load_data,
-        0u
-      };
-    }
-  }
   return dispatch_info;
 }
 
@@ -7294,20 +7271,6 @@
 
 void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
     HInvokeStaticOrDirect* invoke, Location temp) {
-  // For better instruction scheduling we load the direct code pointer before the method pointer.
-  switch (invoke->GetCodePtrLocation()) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // LR = code address from literal pool with link-time patch.
-      __ Ldr(lr, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR = invoke->GetDirectCodePtr();
-      __ Mov(lr, Operand::From(invoke->GetDirectCodePtr()));
-      break;
-    default:
-      break;
-  }
-
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   switch (invoke->GetMethodLoadKind()) {
     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
@@ -7323,9 +7286,6 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ Ldr(RegisterFrom(temp), DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       HArmDexCacheArraysBase* base =
           invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
@@ -7365,30 +7325,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ Bl(GetFrameEntryLabel());
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                          invoke->GetTargetMethod().dex_method_index);
-      {
-        ExactAssemblyScope aas(GetVIXLAssembler(),
-                               vixl32::kMaxInstructionSizeInBytes,
-                               CodeBufferCheckScope::kMaximumSize);
-        __ bind(&relative_call_patches_.back().label);
-        // Arbitrarily branch to the BL itself, override at link time.
-        __ bl(&relative_call_patches_.back().label);
-      }
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // LR prepared above for better instruction scheduling.
-      // LR()
-      {
-        // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
-        ExactAssemblyScope aas(GetVIXLAssembler(),
-                               vixl32::k16BitT32InstructionSizeInBytes,
-                               CodeBufferCheckScope::kExactSize);
-        __ blx(lr);
-      }
-      break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // LR = callee_method->entry_point_from_quick_compiled_code_
       GetAssembler()->LoadFromOffset(
@@ -7552,9 +7488,6 @@
 void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      call_patches_.size() +
-      relative_call_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * pc_relative_dex_cache_patches_.size() +
       boot_image_string_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
@@ -7562,29 +7495,6 @@
       /* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
       boot_image_address_patches_.size();
   linker_patches->reserve(size);
-  for (const auto& entry : method_patches_) {
-    const MethodReference& target_method = entry.first;
-    VIXLUInt32Literal* literal = entry.second;
-    DCHECK(literal->IsBound());
-    uint32_t literal_offset = literal->GetLocation();
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
-                                                       target_method.dex_file,
-                                                       target_method.dex_method_index));
-  }
-  for (const auto& entry : call_patches_) {
-    const MethodReference& target_method = entry.first;
-    VIXLUInt32Literal* literal = entry.second;
-    DCHECK(literal->IsBound());
-    uint32_t literal_offset = literal->GetLocation();
-    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
-                                                     target_method.dex_file,
-                                                     target_method.dex_method_index));
-  }
-  for (const PatchInfo<vixl32::Label>& info : relative_call_patches_) {
-    uint32_t literal_offset = info.label.GetLocation();
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
   for (const auto& entry : boot_image_string_patches_) {
@@ -7643,16 +7553,6 @@
       });
 }
 
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateMethodAddressLiteral(
-    MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &method_patches_);
-}
-
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateMethodCodeLiteral(
-    MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &call_patches_);
-}
-
 void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 45bd164..297d63c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -700,8 +700,6 @@
   VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
   VIXLUInt32Literal* DeduplicateMethodLiteral(MethodReference target_method,
                                               MethodToLiteralMap* map);
-  VIXLUInt32Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  VIXLUInt32Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
                                           uint32_t offset_or_index,
                                           ArenaDeque<PcRelativePatchInfo>* patches);
@@ -724,12 +722,6 @@
 
   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
   Uint32ToLiteralMap uint32_literals_;
-  // Method patch info, map MethodReference to a literal for method address and method code.
-  MethodToLiteralMap method_patches_;
-  MethodToLiteralMap call_patches_;
-  // Relative call patch info.
-  // Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<PatchInfo<vixl::aarch32::Label>> relative_call_patches_;
   // PC-relative patch info for each HArmDexCacheArraysBase.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 897b719..01e0dac 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -458,10 +458,6 @@
       isa_features_(isa_features),
       uint32_literals_(std::less<uint32_t>(),
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      method_patches_(MethodReferenceComparator(),
-                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      call_patches_(MethodReferenceComparator(),
-                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(StringReferenceValueComparator(),
                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1008,8 +1004,6 @@
 void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      call_patches_.size() +
       pc_relative_dex_cache_patches_.size() +
       pc_relative_string_patches_.size() +
       pc_relative_type_patches_.size() +
@@ -1017,24 +1011,6 @@
       boot_image_type_patches_.size() +
       boot_image_address_patches_.size();
   linker_patches->reserve(size);
-  for (const auto& entry : method_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
-                                                       target_method.dex_file,
-                                                       target_method.dex_method_index));
-  }
-  for (const auto& entry : call_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
-    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
-                                                     target_method.dex_file,
-                                                     target_method.dex_method_index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
   if (!GetCompilerOptions().IsBootImage()) {
@@ -1107,14 +1083,6 @@
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
 }
 
-Literal* CodeGeneratorMIPS::DeduplicateMethodAddressLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &method_patches_);
-}
-
-Literal* CodeGeneratorMIPS::DeduplicateMethodCodeLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &call_patches_);
-}
-
 Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
                                                               dex::StringIndex string_index) {
   return boot_image_string_patches_.GetOrCreate(
@@ -1165,11 +1133,15 @@
   __ SetReorder(reordering);
 }
 
-void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+void CodeGeneratorMIPS::MarkGCCard(Register object,
+                                   Register value,
+                                   bool value_can_be_null) {
   MipsLabel done;
   Register card = AT;
   Register temp = TMP;
-  __ Beqz(value, &done);
+  if (value_can_be_null) {
+    __ Beqz(value, &done);
+  }
   __ LoadFromOffset(kLoadWord,
                     card,
                     TR,
@@ -1177,7 +1149,9 @@
   __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
   __ Addu(temp, card, temp);
   __ Sb(card, temp, 0);
-  __ Bind(&done);
+  if (value_can_be_null) {
+    __ Bind(&done);
+  }
 }
 
 void CodeGeneratorMIPS::SetupBlockedRegisters() const {
@@ -2096,7 +2070,7 @@
           __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
           if (needs_write_barrier) {
             DCHECK_EQ(value_type, Primitive::kPrimNot);
-            codegen_->MarkGCCard(obj, value);
+            codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
           }
         }
       } else {
@@ -4900,7 +4874,8 @@
 
 void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
                                                   const FieldInfo& field_info,
-                                                  uint32_t dex_pc) {
+                                                  uint32_t dex_pc,
+                                                  bool value_can_be_null) {
   Primitive::Type type = field_info.GetFieldType();
   LocationSummary* locations = instruction->GetLocations();
   Register obj = locations->InAt(0).AsRegister<Register>();
@@ -4995,7 +4970,7 @@
   // TODO: memory barriers?
   if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
     Register src = value_location.AsRegister<Register>();
-    codegen_->MarkGCCard(obj, src);
+    codegen_->MarkGCCard(obj, src, value_can_be_null);
   }
 
   if (is_volatile) {
@@ -5016,7 +4991,10 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+  HandleFieldSet(instruction,
+                 instruction->GetFieldInfo(),
+                 instruction->GetDexPc(),
+                 instruction->GetValueCanBeNull());
 }
 
 void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(
@@ -5157,22 +5135,7 @@
   // art::PrepareForRegisterAllocation.
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
-  HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
-  HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  // kDirectAddressWithFixup and kCallDirectWithFixup need no extra input on R6 because
-  // R6 has PC-relative addressing.
-  bool has_extra_input = !isR6 &&
-      ((method_load_kind == HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup) ||
-       (code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup));
-
-  if (invoke->HasPcRelativeDexCache()) {
-    // kDexCachePcRelative is mutually exclusive with
-    // kDirectAddressWithFixup/kCallDirectWithFixup.
-    CHECK(!has_extra_input);
-    has_extra_input = true;
-  }
+  bool has_extra_input = invoke->HasPcRelativeDexCache();
 
   IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
@@ -5312,9 +5275,7 @@
   // is incompatible with it.
   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   bool fallback_load = true;
-  bool fallback_call = true;
   switch (dispatch_info.method_load_kind) {
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
       fallback_load = has_irreducible_loops;
       break;
@@ -5322,25 +5283,10 @@
       fallback_load = false;
       break;
   }
-  switch (dispatch_info.code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      fallback_call = has_irreducible_loops;
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      // TODO: Implement this type.
-      break;
-    default:
-      fallback_call = false;
-      break;
-  }
   if (fallback_load) {
     dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
     dispatch_info.method_load_data = 0;
   }
-  if (fallback_call) {
-    dispatch_info.code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    dispatch_info.direct_code_ptr = 0;
-  }
   return dispatch_info;
 }
 
@@ -5349,31 +5295,10 @@
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
-  bool isR6 = isa_features_.IsR6();
-  // kDirectAddressWithFixup and kCallDirectWithFixup have no extra input on R6 because
-  // R6 has PC-relative addressing.
-  bool has_extra_input = invoke->HasPcRelativeDexCache() ||
-      (!isR6 &&
-       ((method_load_kind == HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup) ||
-        (code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup)));
-  Register base_reg = has_extra_input
+  Register base_reg = invoke->HasPcRelativeDexCache()
       ? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
       : ZERO;
 
-  // For better instruction scheduling we load the direct code pointer before the method pointer.
-  switch (code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // T9 = invoke->GetDirectCodePtr();
-      __ LoadConst32(T9, invoke->GetDirectCodePtr());
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // T9 = code address from literal pool with link-time patch.
-      __ LoadLiteral(T9, base_reg, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      break;
-    default:
-      break;
-  }
-
   switch (method_load_kind) {
     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
       // temp = thread->string_init_entrypoint
@@ -5391,11 +5316,6 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ LoadLiteral(temp.AsRegister<Register>(),
-                     base_reg,
-                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       HMipsDexCacheArraysBase* base =
           invoke->InputAt(invoke->GetSpecialInputIndex())->AsMipsDexCacheArraysBase();
@@ -5438,18 +5358,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ Bal(&frame_entry_label_);
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // T9 prepared above for better instruction scheduling.
-      // T9()
-      __ Jalr(T9);
-      __ NopIfNoReordering();
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      // TODO: Implement this type.
-      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
-      LOG(FATAL) << "Unsupported";
-      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // T9 = callee_method->entry_point_from_quick_compiled_code_;
       __ LoadFromOffset(kLoadWord,
@@ -6277,7 +6185,10 @@
 }
 
 void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+  HandleFieldSet(instruction,
+                 instruction->GetFieldInfo(),
+                 instruction->GetDexPc(),
+                 instruction->GetValueCanBeNull());
 }
 
 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index f03f29c..7b0812c 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -236,7 +236,10 @@
   void HandleBinaryOp(HBinaryOperation* operation);
   void HandleCondition(HCondition* instruction);
   void HandleShift(HBinaryOperation* operation);
-  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+  void HandleFieldSet(HInstruction* instruction,
+                      const FieldInfo& field_info,
+                      uint32_t dex_pc,
+                      bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
   // Generate a GC root reference load:
   //
@@ -350,7 +353,7 @@
   // Emit linker patches.
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
-  void MarkGCCard(Register object, Register value);
+  void MarkGCCard(Register object, Register value, bool value_can_be_null);
 
   // Register allocation.
 
@@ -474,8 +477,6 @@
 
   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
   Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
-  Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
                                           uint32_t offset_or_index,
                                           ArenaDeque<PcRelativePatchInfo>* patches);
@@ -495,9 +496,6 @@
 
   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
   Uint32ToLiteralMap uint32_literals_;
-  // Method patch info, map MethodReference to a literal for method address and method code.
-  MethodToLiteralMap method_patches_;
-  MethodToLiteralMap call_patches_;
   // PC-relative patch info for each HMipsDexCacheArraysBase.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index aedf31e..36690c0 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -415,12 +415,7 @@
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       uint64_literals_(std::less<uint64_t>(),
                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      method_patches_(MethodReferenceComparator(),
-                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      call_patches_(MethodReferenceComparator(),
-                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(StringReferenceValueComparator(),
                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -924,44 +919,15 @@
 void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      call_patches_.size() +
       pc_relative_dex_cache_patches_.size() +
-      relative_call_patches_.size() +
       pc_relative_string_patches_.size() +
       pc_relative_type_patches_.size() +
       boot_image_string_patches_.size() +
       boot_image_type_patches_.size() +
       boot_image_address_patches_.size();
   linker_patches->reserve(size);
-  for (const auto& entry : method_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
-                                                       target_method.dex_file,
-                                                       target_method.dex_method_index));
-  }
-  for (const auto& entry : call_patches_) {
-    const MethodReference& target_method = entry.first;
-    Literal* literal = entry.second;
-    DCHECK(literal->GetLabel()->IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
-    linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
-                                                     target_method.dex_file,
-                                                     target_method.dex_method_index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
-  for (const PcRelativePatchInfo& info : relative_call_patches_) {
-    const DexFile& dex_file = info.target_dex_file;
-    uint32_t method_index = info.offset_or_index;
-    DCHECK(info.pc_rel_label.IsBound());
-    uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(pc_rel_offset, &dex_file, method_index));
-  }
   if (!GetCompilerOptions().IsBootImage()) {
     EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
                                                                   linker_patches);
@@ -1013,11 +979,6 @@
   return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
 }
 
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeCallPatch(
-    const DexFile& dex_file, uint32_t method_index) {
-  return NewPcRelativePatch(dex_file, method_index, &relative_call_patches_);
-}
-
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
     const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
   patches->emplace_back(dex_file, offset_or_index);
@@ -1043,14 +1004,6 @@
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
 }
 
-Literal* CodeGeneratorMIPS64::DeduplicateMethodAddressLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &method_patches_);
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateMethodCodeLiteral(MethodReference target_method) {
-  return DeduplicateMethodLiteral(target_method, &call_patches_);
-}
-
 Literal* CodeGeneratorMIPS64::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
                                                                 dex::StringIndex string_index) {
   return boot_image_string_patches_.GetOrCreate(
@@ -1976,9 +1929,6 @@
 
   Primitive::Type type = instruction->InputAt(0)->GetType();
   LocationSummary* locations = instruction->GetLocations();
-  GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-  Mips64Label true_label;
-
   switch (type) {
     default:
       // Integer case.
@@ -1987,29 +1937,11 @@
     case Primitive::kPrimLong:
       GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
       return;
-
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble:
-      // TODO: don't use branches.
-      GenerateFpCompareAndBranch(instruction->GetCondition(),
-                                 instruction->IsGtBias(),
-                                 type,
-                                 locations,
-                                 &true_label);
-      break;
+      GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
+     return;
   }
-
-  // Convert the branches into the result.
-  Mips64Label done;
-
-  // False case: result = 0.
-  __ LoadConst32(dst, 0);
-  __ Bc(&done);
-
-  // True case: result = 1.
-  __ Bind(&true_label);
-  __ LoadConst32(dst, 1);
-  __ Bind(&done);
 }
 
 void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
@@ -2424,19 +2356,40 @@
   switch (cond) {
     case kCondEQ:
     case kCondNE:
-      if (use_imm && IsUint<16>(rhs_imm)) {
-        __ Xori(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
+      if (use_imm && IsInt<16>(-rhs_imm)) {
+        if (rhs_imm == 0) {
+          if (cond == kCondEQ) {
+            __ Sltiu(dst, lhs, 1);
+          } else {
+            __ Sltu(dst, ZERO, lhs);
+          }
+        } else {
+          if (is64bit) {
+            __ Daddiu(dst, lhs, -rhs_imm);
+          } else {
+            __ Addiu(dst, lhs, -rhs_imm);
+          }
+          if (cond == kCondEQ) {
+            __ Sltiu(dst, dst, 1);
+          } else {
+            __ Sltu(dst, ZERO, dst);
+          }
         }
-        __ Xor(dst, lhs, rhs_reg);
-      }
-      if (cond == kCondEQ) {
-        __ Sltiu(dst, dst, 1);
       } else {
-        __ Sltu(dst, ZERO, dst);
+        if (use_imm && IsUint<16>(rhs_imm)) {
+          __ Xori(dst, lhs, rhs_imm);
+        } else {
+          if (use_imm) {
+            rhs_reg = TMP;
+            __ LoadConst64(rhs_reg, rhs_imm);
+          }
+          __ Xor(dst, lhs, rhs_reg);
+        }
+        if (cond == kCondEQ) {
+          __ Sltiu(dst, dst, 1);
+        } else {
+          __ Sltu(dst, ZERO, dst);
+        }
       }
       break;
 
@@ -2623,6 +2576,121 @@
   }
 }
 
+void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond,
+                                                       bool gt_bias,
+                                                       Primitive::Type type,
+                                                       LocationSummary* locations) {
+  GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+  FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+  FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+  if (type == Primitive::kPrimFloat) {
+    switch (cond) {
+      case kCondEQ:
+        __ CmpEqS(FTMP, lhs, rhs);
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondNE:
+        __ CmpEqS(FTMP, lhs, rhs);
+        __ Mfc1(dst, FTMP);
+        __ Addiu(dst, dst, 1);
+        break;
+      case kCondLT:
+        if (gt_bias) {
+          __ CmpLtS(FTMP, lhs, rhs);
+        } else {
+          __ CmpUltS(FTMP, lhs, rhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondLE:
+        if (gt_bias) {
+          __ CmpLeS(FTMP, lhs, rhs);
+        } else {
+          __ CmpUleS(FTMP, lhs, rhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondGT:
+        if (gt_bias) {
+          __ CmpUltS(FTMP, rhs, lhs);
+        } else {
+          __ CmpLtS(FTMP, rhs, lhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondGE:
+        if (gt_bias) {
+          __ CmpUleS(FTMP, rhs, lhs);
+        } else {
+          __ CmpLeS(FTMP, rhs, lhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+        UNREACHABLE();
+    }
+  } else {
+    DCHECK_EQ(type, Primitive::kPrimDouble);
+    switch (cond) {
+      case kCondEQ:
+        __ CmpEqD(FTMP, lhs, rhs);
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondNE:
+        __ CmpEqD(FTMP, lhs, rhs);
+        __ Mfc1(dst, FTMP);
+        __ Addiu(dst, dst, 1);
+        break;
+      case kCondLT:
+        if (gt_bias) {
+          __ CmpLtD(FTMP, lhs, rhs);
+        } else {
+          __ CmpUltD(FTMP, lhs, rhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondLE:
+        if (gt_bias) {
+          __ CmpLeD(FTMP, lhs, rhs);
+        } else {
+          __ CmpUleD(FTMP, lhs, rhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondGT:
+        if (gt_bias) {
+          __ CmpUltD(FTMP, rhs, lhs);
+        } else {
+          __ CmpLtD(FTMP, rhs, lhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      case kCondGE:
+        if (gt_bias) {
+          __ CmpUleD(FTMP, rhs, lhs);
+        } else {
+          __ CmpLeD(FTMP, rhs, lhs);
+        }
+        __ Mfc1(dst, FTMP);
+        __ Andi(dst, dst, 1);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
+        UNREACHABLE();
+    }
+  }
+}
+
 void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
                                                                 bool gt_bias,
                                                                 Primitive::Type type,
@@ -3276,22 +3344,6 @@
   HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
 
-  // For better instruction scheduling we load the direct code pointer before the method pointer.
-  switch (code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // T9 = invoke->GetDirectCodePtr();
-      __ LoadLiteral(T9, kLoadDoubleword, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // T9 = code address from literal pool with link-time patch.
-      __ LoadLiteral(T9,
-                     kLoadUnsignedWord,
-                     DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      break;
-    default:
-      break;
-  }
-
   switch (method_load_kind) {
     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
       // temp = thread->string_init_entrypoint
@@ -3311,11 +3363,6 @@
                      kLoadDoubleword,
                      DeduplicateUint64Literal(invoke->GetMethodAddress()));
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ LoadLiteral(temp.AsRegister<GpuRegister>(),
-                     kLoadUnsignedWord,
-                     DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       uint32_t offset = invoke->GetDexCacheArrayOffset();
       CodeGeneratorMIPS64::PcRelativePatchInfo* info =
@@ -3358,21 +3405,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ Balc(&frame_entry_label_);
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // T9 prepared above for better instruction scheduling.
-      // T9()
-      __ Jalr(T9);
-      __ Nop();
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info =
-          NewPcRelativeCallPatch(*invoke->GetTargetMethod().dex_file,
-                                 invoke->GetTargetMethod().dex_method_index);
-      EmitPcRelativeAddressPlaceholderHigh(info, AT);
-      __ Jialc(AT, /* placeholder */ 0x5678);
-      break;
-    }
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // T9 = callee_method->entry_point_from_quick_compiled_code_;
       __ LoadFromOffset(kLoadDoubleword,
@@ -4243,9 +4275,12 @@
         break;
       case Primitive::kPrimInt:
       case Primitive::kPrimLong:
-        // Sign-extend 32-bit int into bits 32 through 63 for
-        // int-to-long and long-to-int conversions
-        __ Sll(dst, src, 0);
+        // Sign-extend 32-bit int into bits 32 through 63 for int-to-long and long-to-int
+        // conversions, except when the input and output registers are the same and we are not
+        // converting longs to shorter types. In these cases, do nothing.
+        if ((input_type == Primitive::kPrimLong) || (dst != src)) {
+          __ Sll(dst, src, 0);
+        }
         break;
 
       default:
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 99f11a9..8ac919f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -258,6 +258,10 @@
                                        bool is64bit,
                                        LocationSummary* locations,
                                        Mips64Label* label);
+  void GenerateFpCompare(IfCondition cond,
+                         bool gt_bias,
+                         Primitive::Type type,
+                         LocationSummary* locations);
   void GenerateFpCompareAndBranch(IfCondition cond,
                                   bool gt_bias,
                                   Primitive::Type type,
@@ -338,6 +342,10 @@
     block_labels_ = CommonInitializeLabels<Mips64Label>();
   }
 
+  // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
+  // at aligned locations.
+  uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
+
   void Finalize(CodeAllocator* allocator) OVERRIDE;
 
   // Code generation helpers.
@@ -430,8 +438,6 @@
   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
   Literal* DeduplicateUint64Literal(uint64_t value);
   Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
-  Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
-  Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
 
   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
                                           uint32_t offset_or_index,
@@ -455,12 +461,8 @@
   // Deduplication map for 64-bit literals, used for non-patchable method address or method code
   // address.
   Uint64ToLiteralMap uint64_literals_;
-  // Method patch info, map MethodReference to a literal for method address and method code.
-  MethodToLiteralMap method_patches_;
-  MethodToLiteralMap call_patches_;
   // PC-relative patch info.
   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
-  ArenaDeque<PcRelativePatchInfo> relative_call_patches_;
   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
   BootStringToLiteralMap boot_image_string_patches_;
   // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8612a67..0abe855 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1006,8 +1006,6 @@
       move_resolver_(graph->GetArena(), this),
       assembler_(graph->GetArena()),
       isa_features_(isa_features),
-      method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -4454,20 +4452,7 @@
           HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
     dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
   }
-  switch (dispatch_info.code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
-      // (Though the direct CALL ptr16:32 is available for consideration).
-      return HInvokeStaticOrDirect::DispatchInfo {
-        dispatch_info.method_load_kind,
-        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-        dispatch_info.method_load_data,
-        0u
-      };
-    default:
-      return dispatch_info;
-  }
+  return dispatch_info;
 }
 
 Register CodeGeneratorX86::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
@@ -4514,12 +4499,6 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ movl(temp.AsRegister<Register>(), Immediate(/* placeholder */ 0));
-      method_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                   invoke->GetTargetMethod().dex_method_index);
-      __ Bind(&method_patches_.back().label);  // Bind the label at the end of the "movl" insn.
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
                                                                 temp.AsRegister<Register>());
@@ -4561,19 +4540,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ call(GetFrameEntryLabel());
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
-      relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                          invoke->GetTargetMethod().dex_method_index);
-      Label* label = &relative_call_patches_.back().label;
-      __ call(label);  // Bind to the patch label, override at link time.
-      __ Bind(label);  // Bind the label at the end of the "call" insn.
-      break;
-    }
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
-      LOG(FATAL) << "Unsupported";
-      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // (callee_method + offset_of_quick_compiled_code)()
       __ call(Address(callee_method.AsRegister<Register>(),
@@ -4664,22 +4630,11 @@
 void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      relative_call_patches_.size() +
       pc_relative_dex_cache_patches_.size() +
       simple_patches_.size() +
       string_patches_.size() +
       type_patches_.size();
   linker_patches->reserve(size);
-  for (const PatchInfo<Label>& info : method_patches_) {
-    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset, &info.dex_file, info.index));
-  }
-  for (const PatchInfo<Label>& info : relative_call_patches_) {
-    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
   for (const Label& label : simple_patches_) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c44da97..1af6850 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -613,9 +613,6 @@
   X86Assembler assembler_;
   const X86InstructionSetFeatures& isa_features_;
 
-  // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<PatchInfo<Label>> method_patches_;
-  ArenaDeque<PatchInfo<Label>> relative_call_patches_;
   // PC-relative DexCache access info.
   ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
   // Patch locations for patchoat where the linker doesn't do any other work.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7dfc736..903844f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -960,19 +960,7 @@
 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
       HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
-  switch (desired_dispatch_info.code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
-      return HInvokeStaticOrDirect::DispatchInfo {
-        desired_dispatch_info.method_load_kind,
-        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-        desired_dispatch_info.method_load_data,
-        0u
-      };
-    default:
-      return desired_dispatch_info;
-  }
+  return desired_dispatch_info;
 }
 
 Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
@@ -993,12 +981,6 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ movq(temp.AsRegister<CpuRegister>(), Immediate(invoke->GetMethodAddress()));
       break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      __ movl(temp.AsRegister<CpuRegister>(), Immediate(0));  // Placeholder.
-      method_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                   invoke->GetTargetMethod().dex_method_index);
-      __ Bind(&method_patches_.back().label);  // Bind the label at the end of the "movl" insn.
-      break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
       __ movq(temp.AsRegister<CpuRegister>(),
               Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
@@ -1042,19 +1024,6 @@
     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
       __ call(&frame_entry_label_);
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
-      relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
-                                          invoke->GetTargetMethod().dex_method_index);
-      Label* label = &relative_call_patches_.back().label;
-      __ call(label);  // Bind to the patch label, override at link time.
-      __ Bind(label);  // Bind the label at the end of the "call" insn.
-      break;
-    }
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
-      LOG(FATAL) << "Unsupported";
-      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // (callee_method + offset_of_quick_compiled_code)()
       __ call(Address(callee_method.AsRegister<CpuRegister>(),
@@ -1146,22 +1115,11 @@
 void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
-      method_patches_.size() +
-      relative_call_patches_.size() +
       pc_relative_dex_cache_patches_.size() +
       simple_patches_.size() +
       string_patches_.size() +
       type_patches_.size();
   linker_patches->reserve(size);
-  for (const PatchInfo<Label>& info : method_patches_) {
-    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
-    linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset, &info.dex_file, info.index));
-  }
-  for (const PatchInfo<Label>& info : relative_call_patches_) {
-    uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
-    linker_patches->push_back(
-        LinkerPatch::RelativeCodePatch(literal_offset, &info.dex_file, info.index));
-  }
   EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
                                                                linker_patches);
   for (const Label& label : simple_patches_) {
@@ -1253,8 +1211,6 @@
         assembler_(graph->GetArena()),
         isa_features_(isa_features),
         constant_area_start_(0),
-        method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
-        relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
         string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 391a23b..f827e79 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -596,9 +596,6 @@
   // Used for fixups to the constant area.
   int constant_area_start_;
 
-  // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
-  ArenaDeque<PatchInfo<Label>> method_patches_;
-  ArenaDeque<PatchInfo<Label>> relative_call_patches_;
   // PC-relative DexCache access info.
   ArenaDeque<PatchInfo<Label>> pc_relative_dex_cache_patches_;
   // Patch locations for patchoat where the linker doesn't do any other work.
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 5456b1e..88473f02 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -431,16 +431,17 @@
     if (IsNarrowingLinear(a) || IsNarrowingLinear(b)) {
       return nullptr;  // no transfer
     } else if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
-      return CreateInvariantOp(op, a, b);
+      return CreateInvariantOp(op, a, b);  // direct invariant
     } else if ((a->induction_class == kLinear && b->induction_class == kLinear) ||
                (a->induction_class == kPolynomial && b->induction_class == kPolynomial)) {
-      return CreateInduction(a->induction_class,
-                             a->operation,
-                             TransferAddSub(a->op_a, b->op_a, op),
-                             TransferAddSub(a->op_b, b->op_b, op),
-                             /*fetch*/ nullptr,
-                             type_);
+      // Rule induc(a, b) + induc(a', b') -> induc(a + a', b + b').
+      InductionInfo* new_a = TransferAddSub(a->op_a, b->op_a, op);
+      InductionInfo* new_b = TransferAddSub(a->op_b, b->op_b, op);
+      if (new_a != nullptr && new_b != nullptr)  {
+        return CreateInduction(a->induction_class, a->operation, new_a, new_b, a->fetch, type_);
+      }
     } else if (a->induction_class == kInvariant) {
+      // Rule a + induc(a', b') -> induc(a', a + b') or induc(a + a', a + b').
       InductionInfo* new_a = b->op_a;
       InductionInfo* new_b = TransferAddSub(a, b->op_b, op);
       if (b->induction_class == kWrapAround || b->induction_class == kPeriodic) {
@@ -448,14 +449,19 @@
       } else if (op == kSub) {  // Negation required.
         new_a = TransferNeg(new_a);
       }
-      return CreateInduction(b->induction_class, b->operation, new_a, new_b, b->fetch, type_);
+      if (new_a != nullptr && new_b != nullptr)  {
+        return CreateInduction(b->induction_class, b->operation, new_a, new_b, b->fetch, type_);
+      }
     } else if (b->induction_class == kInvariant) {
+      // Rule induc(a, b) + b' -> induc(a, b + b') or induc(a + b', b + b').
       InductionInfo* new_a = a->op_a;
       InductionInfo* new_b = TransferAddSub(a->op_b, b, op);
       if (a->induction_class == kWrapAround || a->induction_class == kPeriodic) {
         new_a = TransferAddSub(new_a, b, op);
       }
-      return CreateInduction(a->induction_class, a->operation, new_a, new_b, a->fetch, type_);
+      if (new_a != nullptr && new_b != nullptr)  {
+        return CreateInduction(a->induction_class, a->operation, new_a, new_b, a->fetch, type_);
+      }
     }
   }
   return nullptr;
@@ -468,14 +474,14 @@
     if (IsNarrowingLinear(a)) {
       return nullptr;  // no transfer
     } else if (a->induction_class == kInvariant) {
-      return CreateInvariantOp(kNeg, nullptr, a);
+      return CreateInvariantOp(kNeg, nullptr, a);  // direct invariant
     } else if (a->induction_class != kGeometric || a->operation == kMul) {
-      return CreateInduction(a->induction_class,
-                             a->operation,
-                             TransferNeg(a->op_a),
-                             TransferNeg(a->op_b),
-                             a->fetch,
-                             type_);
+      // Rule - induc(a, b) -> induc(-a, -b).
+      InductionInfo* new_a = TransferNeg(a->op_a);
+      InductionInfo* new_b = TransferNeg(a->op_b);
+      if (new_a != nullptr && new_b != nullptr) {
+        return CreateInduction(a->induction_class, a->operation, new_a, new_b, a->fetch, type_);
+      }
     }
   }
   return nullptr;
@@ -490,23 +496,23 @@
     if (IsNarrowingLinear(a) || IsNarrowingLinear(b)) {
       return nullptr;  // no transfer
     } else if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
-      return CreateInvariantOp(kMul, a, b);
+      return CreateInvariantOp(kMul, a, b);  // direct invariant
     } else if (a->induction_class == kInvariant && (b->induction_class != kGeometric ||
                                                     b->operation == kMul)) {
-      return CreateInduction(b->induction_class,
-                             b->operation,
-                             TransferMul(a, b->op_a),
-                             TransferMul(a, b->op_b),
-                             b->fetch,
-                             type_);
+      // Rule a * induc(a', b') -> induc(a * a', b * b').
+      InductionInfo* new_a = TransferMul(a, b->op_a);
+      InductionInfo* new_b = TransferMul(a, b->op_b);
+      if (new_a != nullptr && new_b != nullptr) {
+        return CreateInduction(b->induction_class, b->operation, new_a, new_b, b->fetch, type_);
+      }
     } else if (b->induction_class == kInvariant && (a->induction_class != kGeometric ||
                                                     a->operation == kMul)) {
-      return CreateInduction(a->induction_class,
-                             a->operation,
-                             TransferMul(a->op_a, b),
-                             TransferMul(a->op_b, b),
-                             a->fetch,
-                             type_);
+      // Rule induc(a, b) * b' -> induc(a * b', b * b').
+      InductionInfo* new_a = TransferMul(a->op_a, b);
+      InductionInfo* new_b = TransferMul(a->op_b, b);
+      if (new_a != nullptr && new_b != nullptr) {
+        return CreateInduction(a->induction_class, a->operation, new_a, new_b, a->fetch, type_);
+      }
     }
   }
   return nullptr;
@@ -522,7 +528,7 @@
     if (IsNarrowingIntegralConversion(from, to) &&
         a->induction_class == kLinear &&
         (a->type == to || IsNarrowingIntegralConversion(a->type, to))) {
-      return CreateInduction(kLinear, kNop, a->op_a, a->op_b, /*fetch*/ nullptr, to);
+      return CreateInduction(kLinear, kNop, a->op_a, a->op_b, a->fetch, to);
     }
   }
   return nullptr;
@@ -600,17 +606,15 @@
           return CreateInvariantOp(op, a, b);
         }
       }
-    } else if (b->induction_class == kLinear) {
+    } else if (b->induction_class == kLinear && b->type == type_) {
       // Solve within a tight cycle that adds a term that is already classified as a linear
       // induction for a polynomial induction k = k + i (represented as sum over linear terms).
       if (x == entry_phi && entry_phi->InputCount() == 2 && instruction == entry_phi->InputAt(1)) {
         InductionInfo* initial = LookupInfo(loop, entry_phi->InputAt(0));
-        return CreateInduction(kPolynomial,
-                               kNop,
-                               op == kAdd ? b : TransferNeg(b),
-                               initial,
-                               /*fetch*/ nullptr,
-                               type_);
+        InductionInfo* new_a = op == kAdd ? b : TransferNeg(b);
+        if (new_a != nullptr) {
+          return CreateInduction(kPolynomial, kNop, new_a, initial, /*fetch*/ nullptr, type_);
+        }
       }
     }
   }
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d847879..3b83e95 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -344,6 +344,7 @@
   if (actual_method != nullptr) {
     bool result = TryInlineAndReplace(invoke_instruction,
                                       actual_method,
+                                      ReferenceTypeInfo::CreateInvalid(),
                                       /* do_rtp */ true,
                                       cha_devirtualize);
     if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
@@ -471,9 +472,10 @@
   HInstruction* receiver = invoke_instruction->InputAt(0);
   HInstruction* cursor = invoke_instruction->GetPrevious();
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
-
+  Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
   if (!TryInlineAndReplace(invoke_instruction,
                            resolved_method,
+                           ReferenceTypeInfo::Create(handle, /* is_exact */ true),
                            /* do_rtp */ false,
                            /* cha_devirtualize */ false)) {
     return false;
@@ -591,13 +593,13 @@
       break;
     }
     ArtMethod* method = nullptr;
+
+    Handle<mirror::Class> handle = handles_->NewHandle(classes->Get(i));
     if (invoke_instruction->IsInvokeInterface()) {
-      method = classes->Get(i)->FindVirtualMethodForInterface(
-          resolved_method, pointer_size);
+      method = handle->FindVirtualMethodForInterface(resolved_method, pointer_size);
     } else {
       DCHECK(invoke_instruction->IsInvokeVirtual());
-      method = classes->Get(i)->FindVirtualMethodForVirtual(
-          resolved_method, pointer_size);
+      method = handle->FindVirtualMethodForVirtual(resolved_method, pointer_size);
     }
 
     HInstruction* receiver = invoke_instruction->InputAt(0);
@@ -605,10 +607,13 @@
     HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
 
     dex::TypeIndex class_index = FindClassIndexIn(
-        classes->Get(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
+        handle.Get(), caller_dex_file, caller_compilation_unit_.GetDexCache());
     HInstruction* return_replacement = nullptr;
     if (!class_index.IsValid() ||
-        !TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
+        !TryBuildAndInline(invoke_instruction,
+                           method,
+                           ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+                           &return_replacement)) {
       all_targets_inlined = false;
     } else {
       one_target_inlined = true;
@@ -627,7 +632,7 @@
                                            cursor,
                                            bb_cursor,
                                            class_index,
-                                           classes->Get(i),
+                                           handle.Get(),
                                            invoke_instruction,
                                            deoptimize);
       if (deoptimize) {
@@ -792,7 +797,10 @@
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
 
   HInstruction* return_replacement = nullptr;
-  if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) {
+  if (!TryBuildAndInline(invoke_instruction,
+                         actual_method,
+                         ReferenceTypeInfo::CreateInvalid(),
+                         &return_replacement)) {
     return false;
   }
 
@@ -857,13 +865,14 @@
 
 bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
                                    ArtMethod* method,
+                                   ReferenceTypeInfo receiver_type,
                                    bool do_rtp,
                                    bool cha_devirtualize) {
   HInstruction* return_replacement = nullptr;
   uint32_t dex_pc = invoke_instruction->GetDexPc();
   HInstruction* cursor = invoke_instruction->GetPrevious();
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
-  if (!TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
+  if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
     if (invoke_instruction->IsInvokeInterface()) {
       // Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
       // better than an invoke-interface because:
@@ -921,6 +930,7 @@
 
 bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
                                  ArtMethod* method,
+                                 ReferenceTypeInfo receiver_type,
                                  HInstruction** return_replacement) {
   if (method->IsProxyMethod()) {
     VLOG(compiler) << "Method " << method->PrettyMethod()
@@ -997,7 +1007,8 @@
     return false;
   }
 
-  if (!TryBuildAndInlineHelper(invoke_instruction, method, same_dex_file, return_replacement)) {
+  if (!TryBuildAndInlineHelper(
+          invoke_instruction, method, receiver_type, same_dex_file, return_replacement)) {
     return false;
   }
 
@@ -1194,8 +1205,10 @@
 
 bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
                                        ArtMethod* resolved_method,
+                                       ReferenceTypeInfo receiver_type,
                                        bool same_dex_file,
                                        HInstruction** return_replacement) {
+  DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
   ScopedObjectAccess soa(Thread::Current());
   const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
   const DexFile& callee_dex_file = *resolved_method->GetDexFile();
@@ -1286,12 +1299,13 @@
   }
 
   size_t parameter_index = 0;
+  bool run_rtp = false;
   for (HInstructionIterator instructions(callee_graph->GetEntryBlock()->GetInstructions());
        !instructions.Done();
        instructions.Advance()) {
     HInstruction* current = instructions.Current();
     if (current->IsParameterValue()) {
-      HInstruction* argument = invoke_instruction->InputAt(parameter_index++);
+      HInstruction* argument = invoke_instruction->InputAt(parameter_index);
       if (argument->IsNullConstant()) {
         current->ReplaceWith(callee_graph->GetNullConstant());
       } else if (argument->IsIntConstant()) {
@@ -1305,15 +1319,21 @@
         current->ReplaceWith(
             callee_graph->GetDoubleConstant(argument->AsDoubleConstant()->GetValue()));
       } else if (argument->GetType() == Primitive::kPrimNot) {
-        current->SetReferenceTypeInfo(argument->GetReferenceTypeInfo());
+        if (!resolved_method->IsStatic() && parameter_index == 0 && receiver_type.IsValid()) {
+          run_rtp = true;
+          current->SetReferenceTypeInfo(receiver_type);
+        } else {
+          current->SetReferenceTypeInfo(argument->GetReferenceTypeInfo());
+        }
         current->AsParameterValue()->SetCanBeNull(argument->CanBeNull());
       }
+      ++parameter_index;
     }
   }
 
   // We have replaced formal arguments with actual arguments. If actual types
   // are more specific than the declared ones, run RTP again on the inner graph.
-  if (ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
+  if (run_rtp || ArgumentTypesMoreSpecific(invoke_instruction, resolved_method)) {
     ReferenceTypePropagation(callee_graph,
                              dex_compilation_unit.GetDexCache(),
                              handles_,
@@ -1502,7 +1522,7 @@
 
   ReferenceTypeInfo actual_rti = actual_obj->GetReferenceTypeInfo();
   return (actual_rti.IsExact() && !declared_rti.IsExact()) ||
-         declared_rti.IsStrictSupertypeOf(actual_rti);
+          declared_rti.IsStrictSupertypeOf(actual_rti);
 }
 
 ReferenceTypeInfo HInliner::GetClassRTI(mirror::Class* klass) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 0c64362..4c0b990 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -66,17 +66,20 @@
   // a CHA guard needs to be added for the inlining.
   bool TryInlineAndReplace(HInvoke* invoke_instruction,
                            ArtMethod* resolved_method,
+                           ReferenceTypeInfo receiver_type,
                            bool do_rtp,
                            bool cha_devirtualize)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryBuildAndInline(HInvoke* invoke_instruction,
                          ArtMethod* resolved_method,
+                         ReferenceTypeInfo receiver_type,
                          HInstruction** return_replacement)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
                                ArtMethod* resolved_method,
+                               ReferenceTypeInfo receiver_type,
                                bool same_dex_file,
                                HInstruction** return_replacement);
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 1ca3218..af8e2c8 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -816,8 +816,7 @@
     HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
         HInvokeStaticOrDirect::MethodLoadKind::kStringInit,
         HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-        dchecked_integral_cast<uint64_t>(string_init_entry_point),
-        0U
+        dchecked_integral_cast<uint64_t>(string_init_entry_point)
     };
     MethodReference target_method(dex_file_, method_idx);
     HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
@@ -862,8 +861,7 @@
     HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
         HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
         HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-        0u,
-        0U
+        0u
     };
     MethodReference target_method(resolved_method->GetDexFile(),
                                   resolved_method->GetDexMethodIndex());
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 9b5d7a0..e9c6615 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1648,7 +1648,8 @@
   }
 
   if (type == Primitive::kPrimNot) {
-    codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>());
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>(), value_can_be_null);
   }
 }
 
@@ -1806,7 +1807,8 @@
 
   if (type == Primitive::kPrimNot) {
     // Mark card for object assuming new value is stored.
-    codegen->MarkGCCard(base, value);
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(base, value, value_can_be_null);
   }
 
   // do {
@@ -2464,6 +2466,94 @@
   __ Bind(&done);
 }
 
+// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
+void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainOnly,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  // We will call memcpy() to do the actual work. Allocate the temporary
+  // registers to use the correct input registers, and output register.
+  // memcpy() uses the normal MIPS calling convention.
+  InvokeRuntimeCallingConvention calling_convention;
+
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+
+  Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+  locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  MipsAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+  const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
+
+  Register srcObj = locations->InAt(0).AsRegister<Register>();
+  Register srcBegin = locations->InAt(1).AsRegister<Register>();
+  Register srcEnd = locations->InAt(2).AsRegister<Register>();
+  Register dstObj = locations->InAt(3).AsRegister<Register>();
+  Register dstBegin = locations->InAt(4).AsRegister<Register>();
+
+  Register dstPtr = locations->GetTemp(0).AsRegister<Register>();
+  DCHECK_EQ(dstPtr, A0);
+  Register srcPtr = locations->GetTemp(1).AsRegister<Register>();
+  DCHECK_EQ(srcPtr, A1);
+  Register numChrs = locations->GetTemp(2).AsRegister<Register>();
+  DCHECK_EQ(numChrs, A2);
+
+  Register dstReturn = locations->GetTemp(3).AsRegister<Register>();
+  DCHECK_EQ(dstReturn, V0);
+
+  MipsLabel done;
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Get offset of value field within a string object.
+  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
+  __ Beq(srcEnd, srcBegin, &done);  // No characters to move.
+
+  // Calculate number of characters to be copied.
+  __ Subu(numChrs, srcEnd, srcBegin);
+
+  // Calculate destination address.
+  __ Addiu(dstPtr, dstObj, data_offset);
+  if (IsR6()) {
+    __ Lsa(dstPtr, dstBegin, dstPtr, char_shift);
+  } else {
+    __ Sll(AT, dstBegin, char_shift);
+    __ Addu(dstPtr, dstPtr, AT);
+  }
+
+  // Calculate source address.
+  __ Addiu(srcPtr, srcObj, value_offset);
+  if (IsR6()) {
+    __ Lsa(srcPtr, srcBegin, srcPtr, char_shift);
+  } else {
+    __ Sll(AT, srcBegin, char_shift);
+    __ Addu(srcPtr, srcPtr, AT);
+  }
+
+  // Calculate number of bytes to copy from number of characters.
+  __ Sll(numChrs, numChrs, char_shift);
+
+  codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+
+  __ Bind(&done);
+}
+
 // Unimplemented intrinsics.
 
 UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
@@ -2473,7 +2563,6 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
 
 UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck)
 UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 5a99886..3022e97 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1846,6 +1846,84 @@
   GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
 }
 
+// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
+void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainOnly,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  // We will call memcpy() to do the actual work. Allocate the temporary
+  // registers to use the correct input registers, and output register.
+  // memcpy() uses the normal MIPS calling conventions.
+  InvokeRuntimeCallingConvention calling_convention;
+
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+
+  Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+  locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  Mips64Assembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+  const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
+
+  GpuRegister srcObj = locations->InAt(0).AsRegister<GpuRegister>();
+  GpuRegister srcBegin = locations->InAt(1).AsRegister<GpuRegister>();
+  GpuRegister srcEnd = locations->InAt(2).AsRegister<GpuRegister>();
+  GpuRegister dstObj = locations->InAt(3).AsRegister<GpuRegister>();
+  GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
+
+  GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
+  DCHECK_EQ(dstPtr, A0);
+  GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
+  DCHECK_EQ(srcPtr, A1);
+  GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
+  DCHECK_EQ(numChrs, A2);
+
+  GpuRegister dstReturn = locations->GetTemp(3).AsRegister<GpuRegister>();
+  DCHECK_EQ(dstReturn, V0);
+
+  Mips64Label done;
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Get offset of value field within a string object.
+  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
+  __ Beqc(srcEnd, srcBegin, &done);  // No characters to move.
+
+  // Calculate number of characters to be copied.
+  __ Dsubu(numChrs, srcEnd, srcBegin);
+
+  // Calculate destination address.
+  __ Daddiu(dstPtr, dstObj, data_offset);
+  __ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
+
+  // Calculate source address.
+  __ Daddiu(srcPtr, srcObj, value_offset);
+  __ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
+
+  // Calculate number of bytes to copy from number of characters.
+  __ Dsll(numChrs, numChrs, char_shift);
+
+  codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+
+  __ Bind(&done);
+}
+
 static void GenHighestOneBit(LocationSummary* locations,
                              Primitive::Type type,
                              Mips64Assembler* assembler) {
@@ -1925,7 +2003,6 @@
 }
 
 UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringGetCharsNoCheck)
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index b9e284f..a599c2a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2406,8 +2406,6 @@
       return os << "recursive";
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       return os << "direct";
-    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      return os << "direct_fixup";
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
       return os << "dex_cache_pc_relative";
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1f0c8e8..afa17ce 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3982,12 +3982,6 @@
     // Used for app->boot calls with non-relocatable image and for JIT-compiled calls.
     kDirectAddress,
 
-    // Use ArtMethod* at an address that will be known at link time, embed the direct
-    // address in the code. If the image is relocatable, emit .patch_oat entry.
-    // Used for app->boot calls with relocatable image and boot->boot calls, whether
-    // the image relocatable or not.
-    kDirectAddressWithFixup,
-
     // Load from resolved methods array in the dex cache using a PC-relative load.
     // Used when we need to use the dex cache, for example for invoke-static that
     // may cause class initialization (the entry may point to a resolution method),
@@ -4006,20 +4000,6 @@
     // Recursive call, use local PC-relative call instruction.
     kCallSelf,
 
-    // Use PC-relative call instruction patched at link time.
-    // Used for calls within an oat file, boot->boot or app->app.
-    kCallPCRelative,
-
-    // Call to a known target address, embed the direct address in code.
-    // Used for app->boot call with non-relocatable image and for JIT-compiled calls.
-    kCallDirect,
-
-    // Call to a target address that will be known at link time, embed the direct
-    // address in code. If the image is relocatable, emit .patch_oat entry.
-    // Used for app->boot calls with relocatable image and boot->boot calls, whether
-    // the image relocatable or not.
-    kCallDirectWithFixup,
-
     // Use code pointer from the ArtMethod*.
     // Used when we don't know the target code. This is also the last-resort-kind used when
     // other kinds are unimplemented or impractical (i.e. slow) on a particular architecture.
@@ -4035,7 +4015,6 @@
     //   - the method address for kDirectAddress
     //   - the dex cache arrays offset for kDexCachePcRel.
     uint64_t method_load_data;
-    uint64_t direct_code_ptr;
   };
 
   HInvokeStaticOrDirect(ArenaAllocator* arena,
@@ -4145,7 +4124,6 @@
       return false;
     }
   }
-  bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
 
   QuickEntrypointEnum GetStringInitEntryPoint() const {
     DCHECK(IsStringInit());
@@ -4162,11 +4140,6 @@
     return dispatch_info_.method_load_data;
   }
 
-  uint64_t GetDirectCodePtr() const {
-    DCHECK(HasDirectCodePtr());
-    return dispatch_info_.direct_code_ptr;
-  }
-
   ClinitCheckRequirement GetClinitCheckRequirement() const {
     return GetPackedField<ClinitCheckRequirementField>();
   }
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index 82feb95..e321b9e 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -45,10 +45,6 @@
   }
 
  private:
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
-    HandleInvoke(invoke);
-  }
-
   void InitializePCRelativeBasePointer() {
     // Ensure we only initialize the pointer once.
     if (base_ != nullptr) {
@@ -112,38 +108,6 @@
     block->ReplaceAndRemoveInstructionWith(switch_insn, mips_switch);
   }
 
-  void HandleInvoke(HInvoke* invoke) {
-    // If this is an invoke-static/-direct with PC-relative dex cache array
-    // addressing, we need the PC-relative address base.
-    HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
-    if (invoke_static_or_direct != nullptr) {
-      HInvokeStaticOrDirect::MethodLoadKind method_load_kind =
-          invoke_static_or_direct->GetMethodLoadKind();
-      HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
-          invoke_static_or_direct->GetCodePtrLocation();
-
-      bool has_extra_input =
-          (method_load_kind == HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup) ||
-          (code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup);
-
-      // We can't add a pointer to the constant area if we already have a current
-      // method pointer. This may arise when sharpening doesn't remove the current
-      // method pointer from the invoke.
-      if (invoke_static_or_direct->HasCurrentMethodInput()) {
-        DCHECK(!invoke_static_or_direct->HasPcRelativeDexCache());
-        CHECK(!has_extra_input);
-        return;
-      }
-
-      if (has_extra_input &&
-          !IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
-        InitializePCRelativeBasePointer();
-        // Add the extra parameter base_.
-        invoke_static_or_direct->AddSpecialInput(base_);
-      }
-    }
-  }
-
   CodeGeneratorMIPS* codegen_;
 
   // The generated HMipsComputeBaseMethodAddress in the entry block needed as an
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index c191c66..33b3875 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -270,7 +270,7 @@
         ScopedObjectAccess soa(Thread::Current());
         HInstruction* insert_point = notNullBlock->GetFirstInstruction();
         ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
-            handle_cache_.GetObjectClassHandle(), /* is_exact */ true);
+            handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
         if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
           bound_type = new (graph_->GetArena()) HBoundType(obj);
           bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
@@ -411,7 +411,9 @@
         HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
         if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
           bound_type = new (graph_->GetArena()) HBoundType(obj);
-          bound_type->SetUpperBound(class_rti, /* InstanceOf fails for null. */ false);
+          bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
+          bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
+                                    /* InstanceOf fails for null. */ false);
           instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
         } else {
           // We already have a bound type on the position we would need to insert
@@ -605,15 +607,17 @@
     // Narrow the type as much as possible.
     HInstruction* obj = instr->InputAt(0);
     ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
-    if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
-      instr->SetReferenceTypeInfo(
-          ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
+    if (class_rti.IsExact()) {
+      instr->SetReferenceTypeInfo(class_rti);
     } else if (obj_rti.IsValid()) {
       if (class_rti.IsSupertypeOf(obj_rti)) {
         // Object type is more specific.
         instr->SetReferenceTypeInfo(obj_rti);
       } else {
-        // Upper bound is more specific.
+        // Upper bound is more specific, or unrelated to the object's type.
+        // Note that the object might then be exact, and we know the code dominated by this
+        // bound type is dead. To not confuse potential other optimizations, we mark
+        // the bound as non-exact.
         instr->SetReferenceTypeInfo(
             ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
       }
@@ -644,8 +648,11 @@
 
   if (class_rti.IsValid()) {
     DCHECK(is_first_run_);
+    ScopedObjectAccess soa(Thread::Current());
     // This is the first run of RTP and class is resolved.
-    bound_type->SetUpperBound(class_rti, /* CheckCast succeeds for nulls. */ true);
+    bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
+    bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
+                              /* CheckCast succeeds for nulls. */ true);
   } else {
     // This is the first run of RTP and class is unresolved. Remove the binding.
     // The instruction itself is removed in VisitBoundType so as to not
@@ -795,21 +802,25 @@
 }
 
 void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
-  ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
-  if (!new_rti.IsValid()) {
+  ReferenceTypeInfo input_rti = instr->InputAt(0)->GetReferenceTypeInfo();
+  if (!input_rti.IsValid()) {
     return;  // No new info yet.
   }
 
-  // Make sure that we don't go over the bounded type.
   ReferenceTypeInfo upper_bound_rti = instr->GetUpperBound();
-  if (!upper_bound_rti.IsSupertypeOf(new_rti)) {
-    // Note that the input might be exact, in which case we know the branch leading
-    // to the bound type is dead. We play it safe by not marking the bound type as
-    // exact.
-    bool is_exact = upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
-    new_rti = ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), is_exact);
+  if (upper_bound_rti.IsExact()) {
+    instr->SetReferenceTypeInfo(upper_bound_rti);
+  } else if (upper_bound_rti.IsSupertypeOf(input_rti)) {
+    // input is more specific.
+    instr->SetReferenceTypeInfo(input_rti);
+  } else {
+    // upper_bound is more specific or unrelated.
+    // Note that the object might then be exact, and we know the code dominated by this
+    // bound type is dead. To not confuse potential other optimizations, we mark
+    // the bound as non-exact.
+    instr->SetReferenceTypeInfo(
+        ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
   }
-  instr->SetReferenceTypeInfo(new_rti);
 }
 
 // NullConstant inputs are ignored during merging as they do not provide any useful information.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 91826cf..9fdeccf 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -54,6 +54,24 @@
   }
 }
 
+static bool IsInBootImage(ArtMethod* method) {
+  const std::vector<gc::space::ImageSpace*>& image_spaces =
+      Runtime::Current()->GetHeap()->GetBootImageSpaces();
+  for (gc::space::ImageSpace* image_space : image_spaces) {
+    const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
+    if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
+      return true;
+    }
+  }
+  return false;
+}
+
+static bool AOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& options) {
+  // Including patch information means the AOT code will be patched, which we don't
+  // support in the compiler, and is anyways moving away b/33192586.
+  return IsInBootImage(method) && !options.GetCompilePic() && !options.GetIncludePatchInformation();
+}
+
 void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   if (invoke->IsStringInit()) {
     // Not using the dex cache arrays. But we could still try to use a better dispatch...
@@ -61,68 +79,42 @@
     return;
   }
 
-  HGraph* outer_graph = codegen_->GetGraph();
-  ArtMethod* compiling_method = graph_->GetArtMethod();
+  ArtMethod* callee = invoke->GetResolvedMethod();
+  DCHECK(callee != nullptr);
 
   HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
   uint64_t method_load_data = 0u;
-  uint64_t direct_code_ptr = 0u;
 
-  if (invoke->GetResolvedMethod() == outer_graph->GetArtMethod()) {
-    DCHECK(outer_graph->GetArtMethod() != nullptr);
+  // Note: we never call an ArtMethod through a known code pointer, as
+  // we do not want to keep on invoking it if it gets deoptimized. This
+  // applies to both AOT and JIT.
+  // This also avoids having to find out if the code pointer of an ArtMethod
+  // is the resolution trampoline (for ensuring the class is initialized), or
+  // the interpreter entrypoint. Such code pointers we do not want to call
+  // directly.
+  // Only in the case of a recursive call can we call directly, as we know the
+  // class is initialized already or being initialized, and the call will not
+  // be invoked once the method is deoptimized.
+
+  if (callee == codegen_->GetGraph()->GetArtMethod()) {
+    // Recursive call.
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
+  } else if (Runtime::Current()->UseJitCompilation() ||
+      AOTCanEmbedMethod(callee, codegen_->GetCompilerOptions())) {
+    // JIT or on-device AOT compilation referencing a boot image method.
+    // Use the method address directly.
+    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
+    method_load_data = reinterpret_cast<uintptr_t>(callee);
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   } else {
-    uintptr_t direct_code, direct_method;
-    {
-      ScopedObjectAccess soa(Thread::Current());
-      compiler_driver_->GetCodeAndMethodForDirectCall(
-          (compiling_method == nullptr) ? nullptr : compiling_method->GetDeclaringClass(),
-          invoke->GetResolvedMethod(),
-          &direct_code,
-          &direct_method);
-    }
-    if (direct_method != 0u) {  // Should we use a direct pointer to the method?
-      // Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
-      // kDirectAddress would be fine for image methods, we don't support it at the moment.
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      if (direct_method != static_cast<uintptr_t>(-1)) {  // Is the method pointer known now?
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
-        method_load_data = direct_method;
-      } else {  // The direct pointer will be known at link time.
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
-      }
-    } else {  // Use dex cache.
-      if (!Runtime::Current()->UseJitCompilation()) {
-        // Use PC-relative access to the dex cache arrays.
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
-        DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
-                                    &graph_->GetDexFile());
-        method_load_data = layout.MethodOffset(invoke->GetDexMethodIndex());
-      } else {  // We must go through the ArtMethod's pointer to resolved methods.
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
-      }
-    }
-    if (direct_code != 0u) {  // Should we use a direct pointer to the code?
-      // Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
-      // while kCallDirect would be fine for image methods, we don't support it at the moment.
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      const DexFile* dex_file_of_callee = invoke->GetTargetMethod().dex_file;
-      if (direct_code != static_cast<uintptr_t>(-1)) {  // Is the code pointer known now?
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
-        direct_code_ptr = direct_code;
-      } else if (ContainsElement(compiler_driver_->GetDexFilesForOatFile(), dex_file_of_callee)) {
-        // Use PC-relative calls for invokes within a multi-dex oat file.
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
-      } else {  // The direct pointer will be known at link time.
-        // NOTE: This is used for app->boot calls when compiling an app against
-        // a relocatable but not yet relocated image.
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
-      }
-    } else {  // We must use the code pointer from the ArtMethod.
-      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    }
+    // Use PC-relative access to the dex cache arrays.
+    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
+    DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
+                                &graph_->GetDexFile());
+    method_load_data = layout.MethodOffset(invoke->GetDexMethodIndex());
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   }
 
   if (graph_->IsDebuggable()) {
@@ -132,7 +124,7 @@
   }
 
   HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
-      method_load_kind, code_ptr_location, method_load_data, direct_code_ptr
+      method_load_kind, code_ptr_location, method_load_data
   };
   HInvokeStaticOrDirect::DispatchInfo dispatch_info =
       codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 3cf6a2e..322f6c4 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -39,6 +39,13 @@
 
 class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
  public:
+  // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
+  // fewer system calls than a larger default capacity.
+  static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
+
+  ArmVIXLMacroAssembler()
+      : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
+
   // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
   // CMP+Bcc are generated by default.
   // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index b34e125..5c48759 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -145,7 +145,8 @@
                                               const std::vector<Reg2*> reg2_registers,
                                               std::string (AssemblerTest::*GetName1)(const Reg1&),
                                               std::string (AssemblerTest::*GetName2)(const Reg2&),
-                                              const std::string& fmt) {
+                                              const std::string& fmt,
+                                              int bias = 0) {
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
 
@@ -153,7 +154,7 @@
       for (auto reg2 : reg2_registers) {
         for (int64_t imm : imms) {
           ImmType new_imm = CreateImmediate(imm);
-          (assembler_.get()->*f)(*reg1, *reg2, new_imm);
+          (assembler_.get()->*f)(*reg1, *reg2, new_imm + bias);
           std::string base = fmt;
 
           std::string reg1_string = (this->*GetName1)(*reg1);
@@ -171,7 +172,7 @@
           size_t imm_index = base.find(IMM_TOKEN);
           if (imm_index != std::string::npos) {
             std::ostringstream sreg;
-            sreg << imm;
+            sreg << imm + bias;
             std::string imm_string = sreg.str();
             base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
           }
@@ -188,6 +189,67 @@
     return str;
   }
 
+  template <typename Reg1, typename Reg2, typename Reg3, typename ImmType>
+  std::string RepeatTemplatedRegistersImmBits(void (Ass::*f)(Reg1, Reg2, Reg3, ImmType),
+                                              int imm_bits,
+                                              const std::vector<Reg1*> reg1_registers,
+                                              const std::vector<Reg2*> reg2_registers,
+                                              const std::vector<Reg3*> reg3_registers,
+                                              std::string (AssemblerTest::*GetName1)(const Reg1&),
+                                              std::string (AssemblerTest::*GetName2)(const Reg2&),
+                                              std::string (AssemblerTest::*GetName3)(const Reg3&),
+                                              std::string fmt,
+                                              int bias) {
+    std::string str;
+    std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
+
+    for (auto reg1 : reg1_registers) {
+      for (auto reg2 : reg2_registers) {
+        for (auto reg3 : reg3_registers) {
+          for (int64_t imm : imms) {
+            ImmType new_imm = CreateImmediate(imm);
+            (assembler_.get()->*f)(*reg1, *reg2, *reg3, new_imm + bias);
+            std::string base = fmt;
+
+            std::string reg1_string = (this->*GetName1)(*reg1);
+            size_t reg1_index;
+            while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+              base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+            }
+
+            std::string reg2_string = (this->*GetName2)(*reg2);
+            size_t reg2_index;
+            while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+              base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+            }
+
+            std::string reg3_string = (this->*GetName3)(*reg3);
+            size_t reg3_index;
+            while ((reg3_index = base.find(REG3_TOKEN)) != std::string::npos) {
+              base.replace(reg3_index, ConstexprStrLen(REG3_TOKEN), reg3_string);
+            }
+
+            size_t imm_index = base.find(IMM_TOKEN);
+            if (imm_index != std::string::npos) {
+              std::ostringstream sreg;
+              sreg << imm + bias;
+              std::string imm_string = sreg.str();
+              base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+            }
+
+            if (str.size() > 0) {
+              str += "\n";
+            }
+            str += base;
+          }
+        }
+      }
+    }
+    // Add a newline at the end.
+    str += "\n";
+    return str;
+  }
+
   template <typename ImmType, typename Reg1, typename Reg2>
   std::string RepeatTemplatedImmBitsRegisters(void (Ass::*f)(ImmType, Reg1, Reg2),
                                               const std::vector<Reg1*> reg1_registers,
@@ -245,14 +307,15 @@
                                              int imm_bits,
                                              const std::vector<Reg*> registers,
                                              std::string (AssemblerTest::*GetName)(const RegType&),
-                                             const std::string& fmt) {
+                                             const std::string& fmt,
+                                             int bias) {
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
 
     for (auto reg : registers) {
       for (int64_t imm : imms) {
         ImmType new_imm = CreateImmediate(imm);
-        (assembler_.get()->*f)(*reg, new_imm);
+        (assembler_.get()->*f)(*reg, new_imm + bias);
         std::string base = fmt;
 
         std::string reg_string = (this->*GetName)(*reg);
@@ -264,7 +327,7 @@
         size_t imm_index = base.find(IMM_TOKEN);
         if (imm_index != std::string::npos) {
           std::ostringstream sreg;
-          sreg << imm;
+          sreg << imm + bias;
           std::string imm_string = sreg.str();
           base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
         }
@@ -281,36 +344,60 @@
   }
 
   template <typename ImmType>
-  std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, const std::string& fmt) {
+  std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType),
+                         int imm_bits,
+                         const std::string& fmt,
+                         int bias = 0) {
     return RepeatTemplatedRegistersImmBits<Reg, Reg, ImmType>(f,
         imm_bits,
         GetRegisters(),
         GetRegisters(),
         &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
         &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
-        fmt);
+        fmt,
+        bias);
   }
 
   template <typename ImmType>
-  std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, const std::string& fmt) {
+  std::string RepeatRRRIb(void (Ass::*f)(Reg, Reg, Reg, ImmType),
+                          int imm_bits,
+                          const std::string& fmt,
+                          int bias = 0) {
+    return RepeatTemplatedRegistersImmBits<Reg, Reg, Reg, ImmType>(f,
+        imm_bits,
+        GetRegisters(),
+        GetRegisters(),
+        GetRegisters(),
+        &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+        &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+        &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+        fmt,
+        bias);
+  }
+
+  template <typename ImmType>
+  std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, std::string fmt, int bias = 0) {
     return RepeatTemplatedRegisterImmBits<Reg, ImmType>(f,
         imm_bits,
         GetRegisters(),
         &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
-        fmt);
+        fmt,
+        bias);
   }
 
   template <typename ImmType>
   std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType),
                          int imm_bits,
-                         const std::string& fmt) {
+                         const std::string& fmt,
+                         int bias = 0) {
     return RepeatTemplatedRegistersImmBits<FPReg, Reg, ImmType>(f,
         imm_bits,
         GetFPRegisters(),
         GetRegisters(),
         &AssemblerTest::GetFPRegName,
         &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
-        fmt);
+        fmt,
+        bias);
   }
 
   std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), const std::string& fmt) {
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index b16d99a..ab4f9e9 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5537,7 +5537,7 @@
   "  f6:	f20d 4c01 	addwne	ip, sp, #1025	; 0x401\n",
   "  fa:	f8d9 c084 	ldr.w	ip, [r9, #132]	; 0x84\n",
   "  fe:	f1bc 0f00 	cmp.w	ip, #0\n",
-  " 102:	d16f      	bne.n	1e4 <VixlJniHelpers+0x1e4>\n",
+  " 102:	d171      	bne.n	1e8 <VixlJniHelpers+0x1e8>\n",
   " 104:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 108:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 10c:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
@@ -5593,9 +5593,9 @@
   " 1d4:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 1d8:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 1dc:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
-  " 1e0:	f000 b802 	b.w	1e8 <VixlJniHelpers+0x1e8>\n",
-  " 1e4:	f000 b81a 	b.w	21c <VixlJniHelpers+0x21c>\n",
-  " 1e8:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
+  " 1e0:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
+  " 1e4:	f000 b802 	b.w	1ec <VixlJniHelpers+0x1ec>\n",
+  " 1e8:	f000 b818 	b.w	21c <VixlJniHelpers+0x21c>\n",
   " 1ec:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 1f0:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
   " 1f4:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 3dcad6a..5e83e82 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -635,6 +635,13 @@
   DsFsmInstrRrr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04), rd, rd, rt);
 }
 
+void MipsAssembler::Lsa(Register rd, Register rs, Register rt, int saPlusOne) {
+  CHECK(IsR6());
+  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+  int sa = saPlusOne - 1;
+  DsFsmInstrRrr(EmitR(0x0, rs, rt, rd, sa, 0x05), rd, rs, rt);
+}
+
 void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
   DsFsmInstrRrr(EmitI(0x20, rs, rt, imm16), rt, rs, rs);
 }
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 800dc5f..2fca185 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -262,6 +262,7 @@
   void Srav(Register rd, Register rt, Register rs);
   void Ext(Register rd, Register rt, int pos, int size);  // R2+
   void Ins(Register rd, Register rt, int pos, int size);  // R2+
+  void Lsa(Register rd, Register rs, Register rt, int saPlusOne);  // R6
 
   void Lb(Register rt, Register rs, uint16_t imm16);
   void Lh(Register rt, Register rs, uint16_t imm16);
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index a52f519..30667ef 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -319,6 +319,14 @@
   DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
 }
 
+TEST_F(AssemblerMIPS32r6Test, Lsa) {
+  DriverStr(RepeatRRRIb(&mips::MipsAssembler::Lsa,
+                        2,
+                        "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+                        1),
+            "lsa");
+}
+
 TEST_F(AssemblerMIPS32r6Test, Seleqz) {
   DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
             "seleqz");
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 2419b6b..998f2c7 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -321,6 +321,18 @@
   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos - 32, 0x6);
 }
 
+void Mips64Assembler::Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
+  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+  int sa = saPlusOne - 1;
+  EmitR(0x0, rs, rt, rd, sa, 0x05);
+}
+
+void Mips64Assembler::Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
+  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
+  int sa = saPlusOne - 1;
+  EmitR(0x0, rs, rt, rd, sa, 0x15);
+}
+
 void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
   EmitRtd(0x1f, rt, rd, 2, 0x20);
 }
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index c6d3119..a0a1db6 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -471,6 +471,8 @@
   void Dshd(GpuRegister rd, GpuRegister rt);  // MIPS64
   void Dext(GpuRegister rs, GpuRegister rt, int pos, int size);  // MIPS64
   void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size);  // MIPS64
+  void Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);
+  void Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);  // MIPS64
   void Wsbh(GpuRegister rd, GpuRegister rt);
   void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
   void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);  // MIPS64
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index e6fc6fb..f2cbebb 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1141,6 +1141,22 @@
   DriverStr(expected.str(), "Dinsu");
 }
 
+TEST_F(AssemblerMIPS64Test, Lsa) {
+  DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Lsa,
+                        2,
+                        "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+                        1),
+            "lsa");
+}
+
+TEST_F(AssemblerMIPS64Test, Dlsa) {
+  DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Dlsa,
+                        2,
+                        "dlsa ${reg1}, ${reg2}, ${reg3}, {imm}",
+                        1),
+            "dlsa");
+}
+
 TEST_F(AssemblerMIPS64Test, Wsbh) {
   DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
 }
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 52765f9..85ae61f 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -508,7 +508,7 @@
                                          /* src */ "LMySSLSocket;",
                                          /* is_strict */ true,
                                          /* is_assignable */ true));
-  ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "LMySSLSocket;", true));
+  ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "Ljavax/net/ssl/SSLSocket;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) {
@@ -516,7 +516,7 @@
                                          /* src */ "LMySimpleTimeZone;",
                                          /* is_strict */ true,
                                          /* is_assignable */ true));
-  ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "LMySimpleTimeZone;", true));
+  ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
 TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) {
@@ -539,21 +539,9 @@
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
 }
 
-TEST_F(VerifierDepsTest, Assignable_BothArrays_Erroneous) {
-  ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
-                                         /* src */ "[[LMyErroneousTimeZone;",
-                                         /* is_strict */ true,
-                                         /* is_assignable */ true));
-  // If the component type of an array is erroneous, we record the dependency on
-  // the array type.
-  ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[LMyErroneousTimeZone;", true));
-  ASSERT_TRUE(HasAssignable("[Ljava/util/TimeZone;", "[LMyErroneousTimeZone;", true));
-  ASSERT_FALSE(HasAssignable("Ljava/util/TimeZone;", "LMyErroneousTimeZone;", true));
-}
-
-  // We test that VerifierDeps does not try to optimize by storing assignability
-  // of the component types. This is due to the fact that the component type may
-  // be an erroneous class, even though the array type has resolved status.
+// We test that VerifierDeps does not try to optimize by storing assignability
+// of the component types. This is due to the fact that the component type may
+// be an erroneous class, even though the array type has resolved status.
 
 TEST_F(VerifierDepsTest, Assignable_ArrayToInterface1) {
   ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/io/Serializable;",
@@ -584,7 +572,7 @@
                                          /* src */ "LMySSLSocket;",
                                          /* is_strict */ true,
                                          /* is_assignable */ false));
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySSLSocket;", false));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljavax/net/ssl/SSLSocket;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) {
@@ -592,7 +580,7 @@
                                          /* src */ "LMySimpleTimeZone;",
                                          /* is_strict */ true,
                                          /* is_assignable */ false));
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySimpleTimeZone;", false));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
 }
 
 TEST_F(VerifierDepsTest, NotAssignable_BothArrays) {
@@ -608,16 +596,6 @@
   ASSERT_TRUE(HasClass("Ljava/lang/Thread;", true, "public"));
 }
 
-TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) {
-  ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray"));
-  ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract"));
-}
-
-TEST_F(VerifierDepsTest, ArgumentType_ResolvedPrimitiveArray) {
-  ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedPrimitiveArray"));
-  ASSERT_TRUE(HasClass("[B", true, "public final abstract"));
-}
-
 TEST_F(VerifierDepsTest, ArgumentType_UnresolvedClass) {
   ASSERT_TRUE(VerifyMethod("ArgumentType_UnresolvedClass"));
   ASSERT_TRUE(HasClass("LUnresolvedClass;", false));
@@ -654,7 +632,7 @@
 
 TEST_F(VerifierDepsTest, MergeTypes_RegisterLines) {
   ASSERT_TRUE(VerifyMethod("MergeTypes_RegisterLines"));
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySocketTimeoutException;", true));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/net/SocketTimeoutException;", true));
   ASSERT_TRUE(HasAssignable(
       "Ljava/lang/Exception;", "Ljava/util/concurrent/TimeoutException;", true));
 }
@@ -714,11 +692,6 @@
   ASSERT_TRUE(HasClass("LUnresolvedClass;", false));
 }
 
-TEST_F(VerifierDepsTest, NewArray_Resolved) {
-  ASSERT_TRUE(VerifyMethod("NewArray_Resolved"));
-  ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract"));
-}
-
 TEST_F(VerifierDepsTest, NewArray_Unresolved) {
   ASSERT_TRUE(VerifyMethod("NewArray_Unresolved"));
   ASSERT_TRUE(HasClass("[LUnresolvedClass;", false));
@@ -841,7 +814,7 @@
                        "public",
                        "Ljava/io/InterruptedIOException;"));
   ASSERT_TRUE(HasAssignable(
-      "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true));
+      "Ljava/io/InterruptedIOException;", "Ljava/net/SocketTimeoutException;", true));
 }
 
 TEST_F(VerifierDepsTest, InstanceField_Resolved_DeclaredInSuperclass1) {
@@ -854,7 +827,7 @@
                        "public",
                        "Ljava/io/InterruptedIOException;"));
   ASSERT_TRUE(HasAssignable(
-      "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true));
+      "Ljava/io/InterruptedIOException;", "Ljava/net/SocketTimeoutException;", true));
 }
 
 TEST_F(VerifierDepsTest, InstanceField_Resolved_DeclaredInSuperclass2) {
@@ -866,7 +839,7 @@
                        "public",
                        "Ljava/io/InterruptedIOException;"));
   ASSERT_TRUE(HasAssignable(
-      "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true));
+      "Ljava/io/InterruptedIOException;", "Ljava/net/SocketTimeoutException;", true));
 }
 
 TEST_F(VerifierDepsTest, InstanceField_Unresolved_ReferrerInBoot) {
@@ -995,7 +968,7 @@
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "LMySocketTimeoutException;", true));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/net/SocketTimeoutException;", true));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass1) {
@@ -1009,7 +982,7 @@
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "LMySocketTimeoutException;", true));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/net/SocketTimeoutException;", true));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass2) {
@@ -1123,7 +1096,7 @@
 TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) {
   ASSERT_FALSE(VerifyMethod("InvokeSuper_ThisNotAssignable"));
   ASSERT_TRUE(HasClass("Ljava/lang/Integer;", true, "public final"));
-  ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "LMain;", false));
+  ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
   ASSERT_TRUE(HasMethod(
       "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;"));
 }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 2346635..ece81e3 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1295,17 +1295,23 @@
 
         DCHECK_EQ(output_vdex_fd_, -1);
         std::string vdex_filename = ReplaceFileExtension(oat_filename, "vdex");
-        std::unique_ptr<File> vdex_file(OS::CreateEmptyFile(vdex_filename.c_str()));
-        if (vdex_file.get() == nullptr) {
-          PLOG(ERROR) << "Failed to open vdex file: " << vdex_filename;
-          return false;
+        if (vdex_filename == input_vdex_) {
+          update_input_vdex_ = true;
+          std::unique_ptr<File> vdex_file(OS::OpenFileReadWrite(vdex_filename.c_str()));
+          vdex_files_.push_back(std::move(vdex_file));
+        } else {
+          std::unique_ptr<File> vdex_file(OS::CreateEmptyFile(vdex_filename.c_str()));
+          if (vdex_file.get() == nullptr) {
+            PLOG(ERROR) << "Failed to open vdex file: " << vdex_filename;
+            return false;
+          }
+          if (fchmod(vdex_file->Fd(), 0644) != 0) {
+            PLOG(ERROR) << "Failed to make vdex file world readable: " << vdex_filename;
+            vdex_file->Erase();
+            return false;
+          }
+          vdex_files_.push_back(std::move(vdex_file));
         }
-        if (fchmod(vdex_file->Fd(), 0644) != 0) {
-          PLOG(ERROR) << "Failed to make vdex file world readable: " << vdex_filename;
-          vdex_file->Erase();
-          return false;
-        }
-        vdex_files_.push_back(std::move(vdex_file));
       }
     } else {
       std::unique_ptr<File> oat_file(new File(oat_fd_, oat_location_, /* check_usage */ true));
@@ -1319,7 +1325,6 @@
       }
       oat_files_.push_back(std::move(oat_file));
 
-      DCHECK_NE(input_vdex_fd_, output_vdex_fd_);
       if (input_vdex_fd_ != -1) {
         struct stat s;
         int rc = TEMP_FAILURE_RETRY(fstat(input_vdex_fd_, &s));
@@ -1352,8 +1357,13 @@
         return false;
       }
       vdex_file->DisableAutoClose();
-      if (vdex_file->SetLength(0) != 0) {
-        PLOG(WARNING) << "Truncating vdex file " << vdex_location << " failed.";
+      if (input_vdex_file_ != nullptr && output_vdex_fd_ == input_vdex_fd_) {
+        update_input_vdex_ = true;
+      } else {
+        if (vdex_file->SetLength(0) != 0) {
+          PLOG(ERROR) << "Truncating vdex file " << vdex_location << " failed.";
+          return false;
+        }
       }
       vdex_files_.push_back(std::move(vdex_file));
 
@@ -1542,6 +1552,7 @@
             instruction_set_features_.get(),
             key_value_store_.get(),
             verify,
+            update_input_vdex_,
             &opened_dex_files_map,
             &opened_dex_files)) {
           return false;
@@ -2732,6 +2743,9 @@
   // See CompilerOptions.force_determinism_.
   bool force_determinism_;
 
+  // Whether the given input vdex is also the output.
+  bool update_input_vdex_ = false;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
 };
 
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index c82600b..1f6b874 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -139,6 +139,8 @@
   { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x11, "clo", "DS" },
   { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x12, "dclz", "DS" },
   { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x13, "dclo", "DS" },
+  { kSpecial0Mask | 0x73f, 0x05, "lsa", "DSTj" },
+  { kSpecial0Mask | 0x73f, 0x15, "dlsa", "DSTj" },
   // TODO: sdbbp
 
   // SPECIAL2
@@ -490,6 +492,9 @@
           case 'i':  // Sign-extended lower 16-bit immediate.
             args << static_cast<int16_t>(instruction & 0xffff);
             break;
+          case 'j':  // sa value for lsa/dlsa.
+            args << (sa + 1);
+            break;
           case 'L':  // Jump label.
             {
               // TODO: is this right?
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 96976d9..ef03bb3 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -134,7 +134,8 @@
   // NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
   // without accessing the DexCache and we don't want to do that in release build.
   DCHECK_LT(method_index,
-            GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
+            GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
+                ->GetDexCache()->NumResolvedMethods());
   ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
                                                           method_index,
                                                           pointer_size);
@@ -153,7 +154,8 @@
   // NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
   // without accessing the DexCache and we don't want to do that in release build.
   DCHECK_LT(method_index,
-            GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
+            GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
+                ->GetDexCache()->NumResolvedMethods());
   DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
   mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
                                       method_index,
@@ -184,7 +186,8 @@
 inline mirror::Class* ArtMethod::GetDexCacheResolvedType(dex::TypeIndex type_index,
                                                          PointerSize pointer_size) {
   if (kWithCheck) {
-    mirror::DexCache* dex_cache = GetInterfaceMethodIfProxy(pointer_size)->GetDexCache();
+    mirror::DexCache* dex_cache =
+        GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()->GetDexCache();
     if (UNLIKELY(type_index.index_ >= dex_cache->NumResolvedTypes())) {
       ThrowArrayIndexOutOfBoundsException(type_index.index_, dex_cache->NumResolvedTypes());
       return nullptr;
@@ -330,7 +333,7 @@
 }
 
 inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
-  return GetDexFile()->GetCodeItem(GetCodeItemOffset());
+  return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
 }
 
 inline bool ArtMethod::IsResolvedTypeIdx(dex::TypeIndex type_idx, PointerSize pointer_size) {
@@ -395,11 +398,11 @@
 }
 
 inline mirror::DexCache* ArtMethod::GetDexCache() {
-  if (LIKELY(!IsObsolete())) {
-    return GetDeclaringClass()->GetDexCache();
-  } else {
-    DCHECK(!IsProxyMethod());
+  DCHECK(!IsProxyMethod());
+  if (UNLIKELY(IsObsolete())) {
     return GetObsoleteDexCache();
+  } else {
+    return GetDeclaringClass()->GetDexCache();
   }
 }
 
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 963a541..3bc6f5d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -27,7 +27,6 @@
 #include "invoke_type.h"
 #include "method_reference.h"
 #include "modifiers.h"
-#include "mirror/dex_cache.h"
 #include "mirror/object.h"
 #include "obj_ptr.h"
 #include "read_barrier_option.h"
@@ -221,12 +220,6 @@
     return !IsIntrinsic() && (GetAccessFlags() & kAccObsoleteMethod) != 0;
   }
 
-  void SetIsObsolete() {
-    // TODO We should really support redefining intrinsic if possible.
-    DCHECK(!IsIntrinsic());
-    SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
-  }
-
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   bool IsNative() {
     return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
@@ -333,7 +326,6 @@
   ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
                                                      PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-
   ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
                                                ArtMethod* new_method,
                                                PointerSize pointer_size)
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f24a862..a22efcf 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -672,6 +672,8 @@
   T* data_;  // Backing storage.
   double min_load_factor_;
   double max_load_factor_;
+
+  ART_FRIEND_TEST(InternTableTest, CrossHash);
 };
 
 template <class T, class EmptyFn, class HashFn, class Pred, class Alloc>
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index a11257f..cdcc84d 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -81,9 +81,6 @@
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     const DexFile& dex_file = *dex_cache->GetDexFile();
     string = ResolveString(dex_file, string_idx, dex_cache);
-    if (string != nullptr) {
-      DCHECK_EQ(dex_cache->GetResolvedString(string_idx), string);
-    }
   }
   return string.Ptr();
 }
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index 6e3e1d8..dc89d32 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -60,6 +60,26 @@
   UNREACHABLE();
 }
 
+bool CompilerFilter::IsAnyMethodCompilationEnabled(Filter filter) {
+  switch (filter) {
+    case CompilerFilter::kVerifyNone:
+    case CompilerFilter::kVerifyAtRuntime:
+    case CompilerFilter::kVerifyProfile: return false;
+
+    case CompilerFilter::kInterpretOnly:
+    case CompilerFilter::kSpaceProfile:
+    case CompilerFilter::kSpace:
+    case CompilerFilter::kBalanced:
+    case CompilerFilter::kTime:
+    case CompilerFilter::kSpeedProfile:
+    case CompilerFilter::kSpeed:
+    case CompilerFilter::kLayoutProfile:
+    case CompilerFilter::kEverythingProfile:
+    case CompilerFilter::kEverything: return true;
+  }
+  UNREACHABLE();
+}
+
 bool CompilerFilter::IsVerificationEnabled(Filter filter) {
   switch (filter) {
     case CompilerFilter::kVerifyNone:
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 781d43a..7eb5f9a 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -52,6 +52,11 @@
   static bool IsBytecodeCompilationEnabled(Filter filter);
 
   // Returns true if an oat file with this compiler filter contains
+  // compiled executable code for bytecode, JNI methods, or quickened dex
+  // bytecode.
+  static bool IsAnyMethodCompilationEnabled(Filter filter);
+
+  // Returns true if an oat file with this compiler filter contains
   // compiled executable code for JNI methods.
   static bool IsJniCompilationEnabled(Filter filter);
 
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 14c9c21..f6eeffc 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -563,7 +563,7 @@
       HandleWrapperObjPtr<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
       Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass()));
       const dex::TypeIndex method_type_idx =
-          referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
+          h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
       mirror::Class* method_reference_class = class_linker->ResolveType(method_type_idx, referrer);
       if (UNLIKELY(method_reference_class == nullptr)) {
         // Bad type idx.
@@ -673,7 +673,8 @@
                                size_t expected_size) {
   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
   ArtField* resolved_field =
-      referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
+      referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx,
+                                                                     kRuntimePointerSize);
   if (UNLIKELY(resolved_field == nullptr)) {
     return nullptr;
   }
@@ -732,7 +733,7 @@
   }
   mirror::Class* referring_class = referrer->GetDeclaringClass();
   ArtMethod* resolved_method =
-      referrer->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
+      referring_class->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
   if (UNLIKELY(resolved_method == nullptr)) {
     return nullptr;
   }
@@ -758,9 +759,9 @@
   } else if (type == kSuper) {
     // TODO This lookup is rather slow.
     dex::TypeIndex method_type_idx =
-        referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
+        referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
     mirror::Class* method_reference_class =
-        referrer->GetDexCache()->GetResolvedType(method_type_idx);
+        referring_class->GetDexCache()->GetResolvedType(method_type_idx);
     if (method_reference_class == nullptr) {
       // Need to do full type resolution...
       return nullptr;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4ea1130..03ef962 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -557,10 +557,8 @@
 }
 
 Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
-  if (interpreter_stubs_installed_ && interpret_only_) {
+  if (interpreter_stubs_installed_) {
     return InstrumentationLevel::kInstrumentWithInterpreter;
-  } else if (interpreter_stubs_installed_) {
-    return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
   } else if (entry_exit_stubs_installed_) {
     return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
   } else {
@@ -568,14 +566,6 @@
   }
 }
 
-bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
-  // We need to reinstall instrumentation if we go to a different level or if the current level is
-  // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
-  // interpreter and so we might have started running optimized code again.
-  return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
-      GetCurrentInstrumentationLevel() != new_level;
-}
-
 void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
   // Store the instrumentation level for this key or remove it.
   if (desired_level == InstrumentationLevel::kInstrumentNothing) {
@@ -595,7 +585,8 @@
   interpret_only_ = (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) ||
                     forced_interpret_only_;
 
-  if (!RequiresInstrumentationInstallation(requested_level)) {
+  InstrumentationLevel current_level = GetCurrentInstrumentationLevel();
+  if (requested_level == current_level) {
     // We're already set.
     return;
   }
@@ -604,7 +595,7 @@
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   Locks::thread_list_lock_->AssertNotHeld(self);
   if (requested_level > InstrumentationLevel::kInstrumentNothing) {
-    if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
+    if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
       interpreter_stubs_installed_ = true;
       entry_exit_stubs_installed_ = true;
     } else {
@@ -851,8 +842,7 @@
 void Instrumentation::DisableDeoptimization(const char* key) {
   CHECK_EQ(deoptimization_enabled_, true);
   // If we deoptimized everything, undo it.
-  InstrumentationLevel level = GetCurrentInstrumentationLevel();
-  if (level == InstrumentationLevel::kInstrumentWithInterpreter) {
+  if (interpreter_stubs_installed_) {
     UndeoptimizeEverything(key);
   }
   // Undeoptimized selected methods.
@@ -879,14 +869,6 @@
   return !deoptimization_enabled_ && !interpreter_stubs_installed_;
 }
 
-// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
-// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
-// ensure that the current_method doesn't get kept across suspend points this should be okay.
-// TODO Remove once b/33630159 is resolved.
-void Instrumentation::ReJitEverything(const char* key) {
-  ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
-}
-
 void Instrumentation::DeoptimizeEverything(const char* key) {
   CHECK(deoptimization_enabled_);
   ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 872efac..1e5fcf2 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,9 +133,6 @@
   enum class InstrumentationLevel {
     kInstrumentNothing,                   // execute without instrumentation
     kInstrumentWithInstrumentationStubs,  // execute with instrumentation entry/exit stubs
-    kInstrumentWithInterpreterAndJit,     // execute with interpreter initially and later the JIT
-                                          // (if it is enabled). This level is special in that it
-                                          // always requires re-instrumentation.
     kInstrumentWithInterpreter            // execute with interpreter
   };
 
@@ -166,13 +163,6 @@
   }
   bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Executes everything with the interpreter/jit (if available).
-  void ReJitEverything(const char* key)
-      REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
-      REQUIRES(!Locks::thread_list_lock_,
-               !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
-
   // Executes everything with interpreter.
   void DeoptimizeEverything(const char* key)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -445,10 +435,6 @@
  private:
   InstrumentationLevel GetCurrentInstrumentationLevel() const;
 
-  // Returns true if moving to the given instrumentation level requires the installation of stubs.
-  // False otherwise.
-  bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
-
   // Does the job of installing or removing instrumentation code within methods.
   // In order to support multiple clients using instrumentation at the same time,
   // the caller must pass a unique key (a string) identifying it so we remind which
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9c05d3c..3e19146 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -319,7 +319,9 @@
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return static_cast<size_t>(root.Read<kWithoutReadBarrier>()->GetHashCode());
+  // An additional cast to prevent undesired sign extension.
+  return static_cast<size_t>(
+      static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
 }
 
 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index f661d9f..68454fb 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -163,7 +163,11 @@
         NO_THREAD_SAFETY_ANALYSIS;
 
     // Utf8String can be used for lookup.
-    std::size_t operator()(const Utf8String& key) const { return key.GetHash(); }
+    std::size_t operator()(const Utf8String& key) const {
+      // A cast to prevent undesired sign extension.
+      return static_cast<uint32_t>(key.GetHash());
+    }
+
     bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
         NO_THREAD_SAFETY_ANALYSIS;
   };
@@ -217,6 +221,8 @@
     // We call AddNewTable when we create the zygote to reduce private dirty pages caused by
     // modifying the zygote intern table. The back of table is modified when strings are interned.
     std::vector<UnorderedSet> tables_;
+
+    ART_FRIEND_TEST(InternTableTest, CrossHash);
   };
 
   // Insert if non null, otherwise return null. Must be called holding the mutator lock.
@@ -276,6 +282,7 @@
   gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
 
   friend class Transaction;
+  ART_FRIEND_TEST(InternTableTest, CrossHash);
   DISALLOW_COPY_AND_ASSIGN(InternTable);
 };
 
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b91d946..3991d65 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -16,6 +16,7 @@
 
 #include "intern_table.h"
 
+#include "base/hash_set.h"
 #include "common_runtime_test.h"
 #include "mirror/object.h"
 #include "handle_scope-inl.h"
@@ -62,6 +63,25 @@
   EXPECT_EQ(2U, t.Size());
 }
 
+// Check if table indexes match on 64 and 32 bit machines.
+// This is done by ensuring hash values are the same on every machine and limited to 32-bit wide.
+// Otherwise cross compilation can cause a table to be filled on host using one indexing algorithm
+// and later on a device with different sizeof(size_t) can use another indexing algorithm.
+// Thus the table may provide wrong data.
+TEST_F(InternTableTest, CrossHash) {
+  ScopedObjectAccess soa(Thread::Current());
+  InternTable t;
+
+  // A string that has a negative hash value.
+  GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
+
+  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+  for (InternTable::Table::UnorderedSet& table : t.strong_interns_.tables_) {
+    // The negative hash value shall be 32-bit wide on every host.
+    ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
+  }
+}
+
 class TestPredicate : public IsMarkedVisitor {
  public:
   mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index b599949..423f054 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -251,16 +251,17 @@
     }
   }
   ArtMethod* method = shadow_frame.GetMethod();
+  ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
   // MethodVerifier refuses methods with string_idx out of bounds.
   DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
-            method->GetDexFile()->NumStringIds());
+            declaring_class->GetDexFile().NumStringIds());
   ObjPtr<mirror::String> string_ptr =
-      mirror::StringDexCachePair::Lookup(method->GetDexCache()->GetStrings(),
+      mirror::StringDexCachePair::Lookup(declaring_class->GetDexCache()->GetStrings(),
                                          string_idx.index_,
                                          mirror::DexCache::kDexCacheStringCacheSize).Read();
   if (UNLIKELY(string_ptr == nullptr)) {
     StackHandleScope<1> hs(self);
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+    Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
                                                                      string_idx,
                                                                      dex_cache);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6336cdd..f43e30d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -594,9 +594,6 @@
       VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
       return nullptr;
     }
-    DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsDebuggable())
-        << "Should not be using cha on debuggable apps/runs!";
-
     for (ArtMethod* single_impl : cha_single_implementation_list) {
       Runtime::Current()->GetClassHierarchyAnalysis()->AddDependency(
           single_impl, method, method_header);
@@ -648,69 +645,6 @@
   return CodeCacheSizeLocked();
 }
 
-// This notifies the code cache that the given method has been redefined and that it should remove
-// any cached information it has on the method. All threads must be suspended before calling this
-// method. The compiled code for the method (if there is any) must not be in any threads call stack.
-void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
-  if (method->IsNative()) {
-    return;
-  }
-  ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-  if (info != nullptr) {
-    auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
-    DCHECK(profile != profiling_infos_.end());
-    profiling_infos_.erase(profile);
-  }
-  method->SetProfilingInfo(nullptr);
-  ScopedCodeCacheWrite ccw(code_map_.get());
-  for (auto code_iter = method_code_map_.begin();
-       code_iter != method_code_map_.end();
-       ++code_iter) {
-    if (code_iter->second == method) {
-      FreeCode(code_iter->first);
-      method_code_map_.erase(code_iter);
-    }
-  }
-  auto code_map = osr_code_map_.find(method);
-  if (code_map != osr_code_map_.end()) {
-    osr_code_map_.erase(code_map);
-  }
-}
-
-// This invalidates old_method. Once this function returns one can no longer use old_method to
-// execute code unless it is fixed up. This fixup will happen later in the process of installing a
-// class redefinition.
-// TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
-// shouldn't be used since it is no longer logically in the jit code cache.
-// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
-void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
-  MutexLock mu(Thread::Current(), lock_);
-  // Update ProfilingInfo to the new one and remove it from the old_method.
-  if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
-    DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
-    ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
-    old_method->SetProfilingInfo(nullptr);
-    // Since the JIT should be paused and all threads suspended by the time this is called these
-    // checks should always pass.
-    DCHECK(!info->IsInUseByCompiler());
-    new_method->SetProfilingInfo(info);
-    info->method_ = new_method;
-  }
-  // Update method_code_map_ to point to the new method.
-  for (auto& it : method_code_map_) {
-    if (it.second == old_method) {
-      it.second = new_method;
-    }
-  }
-  // Update osr_code_map_ to point to the new method.
-  auto code_map = osr_code_map_.find(old_method);
-  if (code_map != osr_code_map_.end()) {
-    osr_code_map_.Put(new_method, code_map->second);
-    osr_code_map_.erase(old_method);
-  }
-}
-
 size_t JitCodeCache::CodeCacheSizeLocked() {
   return used_memory_for_code_;
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index b5e3176..d97742d 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -75,10 +75,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
-  void NotifyMethodRedefined(ArtMethod* method)
-      REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!lock_);
-
   // Notify to the code cache that the compiler wants to use the
   // profiling info of `method` to drive optimizations,
   // and therefore ensure the returned profiling info object is not
@@ -223,11 +219,6 @@
   void DisallowInlineCacheAccess() REQUIRES(!lock_);
   void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
 
-  // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
-  // 'new_method' since it is being made obsolete.
-  void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
-      REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
-
  private:
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 9fbf2e3..9902bb5 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -128,9 +128,7 @@
   const uint32_t number_of_inline_caches_;
 
   // Method this profiling info is for.
-  // Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods.
-  // See JitCodeCache::MoveObsoleteMethod.
-  ArtMethod* method_;
+  ArtMethod* const method_;
 
   // Whether the ArtMethod is currently being compiled. This flag
   // is implicitly guarded by the JIT code cache lock.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6f88cc5..ec265e5 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,6 +19,7 @@
 
 #include "array.h"
 #include "art_field.h"
+#include "art_method.h"
 #include "class.h"
 #include "dex_file_types.h"
 #include "object.h"
@@ -26,7 +27,6 @@
 
 namespace art {
 
-class ArtMethod;
 struct DexCacheOffsets;
 class DexFile;
 class ImageWriter;
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 57cc938..68815e7 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -40,8 +40,6 @@
 #include "events-inl.h"
 #include "gc/allocation_listener.h"
 #include "instrumentation.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
 #include "jni_env_ext-inl.h"
 #include "jvmti_allocator.h"
 #include "mirror/class.h"
@@ -55,143 +53,6 @@
 
 using android::base::StringPrintf;
 
-// This visitor walks thread stacks and allocates and sets up the obsolete methods. It also does
-// some basic sanity checks that the obsolete method is sane.
-class ObsoleteMethodStackVisitor : public art::StackVisitor {
- protected:
-  ObsoleteMethodStackVisitor(
-      art::Thread* thread,
-      art::LinearAlloc* allocator,
-      const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
-      /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
-      /*out*/bool* success,
-      /*out*/std::string* error_msg)
-        : StackVisitor(thread,
-                       /*context*/nullptr,
-                       StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-          allocator_(allocator),
-          obsoleted_methods_(obsoleted_methods),
-          obsolete_maps_(obsolete_maps),
-          success_(success),
-          is_runtime_frame_(false),
-          error_msg_(error_msg) {
-    *success_ = true;
-  }
-
-  ~ObsoleteMethodStackVisitor() OVERRIDE {}
-
- public:
-  // Returns true if we successfully installed obsolete methods on this thread, filling
-  // obsolete_maps_ with the translations if needed. Returns false and fills error_msg if we fail.
-  // The stack is cleaned up when we fail.
-  static bool UpdateObsoleteFrames(
-      art::Thread* thread,
-      art::LinearAlloc* allocator,
-      const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
-      /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
-      /*out*/std::string* error_msg) REQUIRES(art::Locks::mutator_lock_) {
-    bool success = true;
-    ObsoleteMethodStackVisitor visitor(thread,
-                                       allocator,
-                                       obsoleted_methods,
-                                       obsolete_maps,
-                                       &success,
-                                       error_msg);
-    visitor.WalkStack();
-    if (!success) {
-      RestoreFrames(thread, *obsolete_maps, error_msg);
-      return false;
-    } else {
-      return true;
-    }
-  }
-
-  static void RestoreFrames(
-      art::Thread* thread ATTRIBUTE_UNUSED,
-      const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsolete_maps ATTRIBUTE_UNUSED,
-      std::string* error_msg)
-        REQUIRES(art::Locks::mutator_lock_) {
-    LOG(FATAL) << "Restoring stack frames is not yet supported. Error was: " << *error_msg;
-  }
-
-  bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
-    art::ArtMethod* old_method = GetMethod();
-    // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
-    // works through runtime methods.
-    bool prev_was_runtime_frame_ = is_runtime_frame_;
-    is_runtime_frame_ = old_method->IsRuntimeMethod();
-    if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
-      // The check below works since when we deoptimize we set shadow frames for all frames until a
-      // native/runtime transition and for those set the return PC to a function that will complete
-      // the deoptimization. This does leave us with the unfortunate side-effect that frames just
-      // below runtime frames cannot be deoptimized at the moment.
-      // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
-      // works through runtime methods.
-      // TODO b/33616143
-      if (!IsShadowFrame() && prev_was_runtime_frame_) {
-        *error_msg_ = StringPrintf("Deoptimization failed due to runtime method in stack.");
-        *success_ = false;
-        return false;
-      }
-      // We cannot ensure that the right dex file is used in inlined frames so we don't support
-      // redefining them.
-      DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
-      // TODO We should really support intrinsic obsolete methods.
-      // TODO We should really support redefining intrinsics.
-      // We don't support intrinsics so check for them here.
-      DCHECK(!old_method->IsIntrinsic());
-      art::ArtMethod* new_obsolete_method = nullptr;
-      auto obsolete_method_pair = obsolete_maps_->find(old_method);
-      if (obsolete_method_pair == obsolete_maps_->end()) {
-        // Create a new Obsolete Method and put it in the list.
-        art::Runtime* runtime = art::Runtime::Current();
-        art::ClassLinker* cl = runtime->GetClassLinker();
-        auto ptr_size = cl->GetImagePointerSize();
-        const size_t method_size = art::ArtMethod::Size(ptr_size);
-        auto* method_storage = allocator_->Alloc(GetThread(), method_size);
-        if (method_storage == nullptr) {
-          *success_ = false;
-          *error_msg_ = StringPrintf("Unable to allocate storage for obsolete version of '%s'",
-                                     old_method->PrettyMethod().c_str());
-          return false;
-        }
-        new_obsolete_method = new (method_storage) art::ArtMethod();
-        new_obsolete_method->CopyFrom(old_method, ptr_size);
-        DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
-        new_obsolete_method->SetIsObsolete();
-        obsolete_maps_->insert({old_method, new_obsolete_method});
-        // Update JIT Data structures to point to the new method.
-        art::jit::Jit* jit = art::Runtime::Current()->GetJit();
-        if (jit != nullptr) {
-          // Notify the JIT we are making this obsolete method. It will update the jit's internal
-          // structures to keep track of the new obsolete method.
-          jit->GetCodeCache()->MoveObsoleteMethod(old_method, new_obsolete_method);
-        }
-      } else {
-        new_obsolete_method = obsolete_method_pair->second;
-      }
-      DCHECK(new_obsolete_method != nullptr);
-      SetMethod(new_obsolete_method);
-    }
-    return true;
-  }
-
- private:
-  // The linear allocator we should use to make new methods.
-  art::LinearAlloc* allocator_;
-  // The set of all methods which could be obsoleted.
-  const std::unordered_set<art::ArtMethod*>& obsoleted_methods_;
-  // A map from the original to the newly allocated obsolete method for frames on this thread. The
-  // values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
-  // the redefined classes ClassExt by the caller.
-  std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
-  bool* success_;
-  // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
-  // works through runtime methods.
-  bool is_runtime_frame_;
-  std::string* error_msg_;
-};
-
 // Moves dex data to an anonymous, read-only mmap'd region.
 std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
                                                          jint data_len,
@@ -215,8 +76,6 @@
   return map;
 }
 
-// TODO This should handle doing multiple classes at once so we need to do less cleanup when things
-// go wrong.
 jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
                                     art::Runtime* runtime,
                                     art::Thread* self,
@@ -257,9 +116,6 @@
     *error_msg = os.str();
     return ERR(INVALID_CLASS_FORMAT);
   }
-  // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
-  // are going to redefine.
-  art::jit::ScopedJitSuspend suspend_jit;
   // Get shared mutator lock.
   art::ScopedObjectAccess soa(self);
   art::StackHandleScope<1> hs(self);
@@ -440,107 +296,6 @@
   return true;
 }
 
-struct CallbackCtx {
-  Redefiner* const r;
-  art::LinearAlloc* allocator;
-  std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
-  std::unordered_set<art::ArtMethod*> obsolete_methods;
-  bool success;
-  std::string* error_msg;
-
-  CallbackCtx(Redefiner* self, art::LinearAlloc* alloc, std::string* error)
-      : r(self), allocator(alloc), success(true), error_msg(error) {}
-};
-
-void DoRestoreObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
-  CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
-  ObsoleteMethodStackVisitor::RestoreFrames(t, data->obsolete_map, data->error_msg);
-}
-
-void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
-  CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
-  if (data->success) {
-    // Don't do anything if we already failed once.
-    data->success = ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t,
-                                                                     data->allocator,
-                                                                     data->obsolete_methods,
-                                                                     &data->obsolete_map,
-                                                                     data->error_msg);
-  }
-}
-
-// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
-// updated so they will be run.
-bool Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
-  art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
-  art::mirror::ClassExt* ext = art_klass->GetExtData();
-  CHECK(ext->GetObsoleteMethods() != nullptr);
-  CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator(), error_msg_);
-  // Add all the declared methods to the map
-  for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
-    ctx.obsolete_methods.insert(&m);
-  }
-  for (art::ArtMethod* old_method : ctx.obsolete_methods) {
-    if (old_method->IsIntrinsic()) {
-      *error_msg_ = StringPrintf("Method '%s' is intrinsic and cannot be made obsolete!",
-                                 old_method->PrettyMethod().c_str());
-      return false;
-    }
-  }
-  {
-    art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
-    art::ThreadList* list = art::Runtime::Current()->GetThreadList();
-    list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
-    if (!ctx.success) {
-      list->ForEach(DoRestoreObsoleteMethodsCallback, static_cast<void*>(&ctx));
-      return false;
-    }
-  }
-  FillObsoleteMethodMap(art_klass, ctx.obsolete_map);
-  return true;
-}
-
-// Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to
-// figure out their DexCaches.
-void Redefiner::FillObsoleteMethodMap(
-    art::mirror::Class* art_klass,
-    const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
-  int32_t index = 0;
-  art::mirror::ClassExt* ext_data = art_klass->GetExtData();
-  art::mirror::PointerArray* obsolete_methods = ext_data->GetObsoleteMethods();
-  art::mirror::ObjectArray<art::mirror::DexCache>* obsolete_dex_caches =
-      ext_data->GetObsoleteDexCaches();
-  int32_t num_method_slots = obsolete_methods->GetLength();
-  // Find the first empty index.
-  for (; index < num_method_slots; index++) {
-    if (obsolete_methods->GetElementPtrSize<art::ArtMethod*>(
-          index, art::kRuntimePointerSize) == nullptr) {
-      break;
-    }
-  }
-  // Make sure we have enough space.
-  CHECK_GT(num_method_slots, static_cast<int32_t>(obsoletes.size() + index));
-  CHECK(obsolete_dex_caches->Get(index) == nullptr);
-  // Fill in the map.
-  for (auto& obs : obsoletes) {
-    obsolete_methods->SetElementPtrSize(index, obs.second, art::kRuntimePointerSize);
-    obsolete_dex_caches->Set(index, art_klass->GetDexCache());
-    index++;
-  }
-}
-
-// TODO It should be possible to only deoptimize the specific obsolete methods.
-// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
-// If one of these frames is an obsolete method we have a problem. b/33616143
-// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
-// registers across suspend points.
-// TODO Pending b/33630159
-void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
-  art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
-  art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
-  i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
-}
-
 jvmtiError Redefiner::Run() {
   art::StackHandleScope<5> hs(self_);
   // TODO We might want to have a global lock (or one based on the class being redefined at least)
@@ -583,11 +338,6 @@
   self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
   runtime_->GetThreadList()->SuspendAll(
       "Final installation of redefined Class!", /*long_suspend*/true);
-  // TODO We need to invalidate all breakpoints in the redefined class with the debugger.
-  // TODO We need to deal with any instrumentation/debugger deoptimized_methods_.
-  // TODO We need to update all debugger MethodIDs so they note the method they point to is
-  // obsolete or implement some other well defined semantics.
-  // TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
   // TODO Might want to move this into a different type.
   // Now we reach the part where we must do active cleanup if something fails.
   // TODO We should really Retry if this fails instead of simply aborting.
@@ -595,8 +345,7 @@
   art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr);
   if (!UpdateJavaDexFile(java_dex_file.Get(),
                          new_dex_file_cookie.Get(),
-                         &original_dex_file_cookie) ||
-      !FindAndAllocateObsoleteMethods(art_class.Get())) {
+                         &original_dex_file_cookie)) {
     // Release suspendAll
     runtime_->GetThreadList()->ResumeAll();
     // Get back shared mutator lock as expected for return.
@@ -612,25 +361,23 @@
     self_->TransitionFromSuspendedToRunnable();
     return result_;
   }
-  // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
-  // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
-  // DexCache.
-  // TODO This can fail (leave some methods optimized) near runtime methods (including
-  // quick-to-interpreter transition function).
-  // TODO We probably don't need this at all once we have a way to ensure that the
-  // current_art_method is never stashed in a (physical) register by the JIT and lost to the
-  // stack-walker.
-  EnsureObsoleteMethodsAreDeoptimized();
-  // TODO Verify the new Class.
-  // TODO   Failure then undo updates to class
-  // TODO Shrink the obsolete method maps if possible?
-  // TODO find appropriate class loader.
+  // Update the ClassObjects Keep the old DexCache (and other stuff) around so we can restore
+  // functions/fields.
+  // Verify the new Class.
+  //   Failure then undo updates to class
+  // Do stack walks and allocate obsolete methods
+  // Shrink the obsolete method maps if possible?
+  // TODO find appropriate class loader. Allocate new dex files array. Pause all java treads.
+  // Replace dex files array. Do stack scan + allocate obsoletes. Remove array if possible.
+  // TODO We might want to ensure that all threads are stopped for this!
+  // AddDexToClassPath();
+  // TODO
+  // Release suspendAll
   // TODO Put this into a scoped thing.
   runtime_->GetThreadList()->ResumeAll();
   // Get back shared mutator lock as expected for return.
   self_->TransitionFromSuspendedToRunnable();
-  // TODO Do the dex_file_ release at a more reasonable place. This works but it muddles who really
-  // owns the DexFile.
+  // TODO Do this at a more reasonable place.
   dex_file_.release();
   return OK;
 }
@@ -673,24 +420,19 @@
     }
     const art::DexFile::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx,
                                                                    new_type_list);
-    // TODO Return false, cleanup.
     CHECK(proto_id != nullptr || old_type_list == nullptr);
+    // TODO Return false, cleanup.
     const art::DexFile::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
                                                                       *new_name_id,
                                                                       *proto_id);
-    // TODO Return false, cleanup.
     CHECK(method_id != nullptr);
+    // TODO Return false, cleanup.
     uint32_t dex_method_idx = dex_file_->GetIndexForMethodId(*method_id);
     method.SetDexMethodIndex(dex_method_idx);
     linker->SetEntryPointsToInterpreter(&method);
     method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(class_def, dex_method_idx));
     method.SetDexCacheResolvedMethods(new_dex_cache->GetResolvedMethods(), image_pointer_size);
     method.SetDexCacheResolvedTypes(new_dex_cache->GetResolvedTypes(), image_pointer_size);
-    // Notify the jit that this method is redefined.
-    art::jit::Jit* jit = runtime_->GetJit();
-    if (jit != nullptr) {
-      jit->GetCodeCache()->NotifyMethodRedefined(&method);
-    }
   }
   return true;
 }
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 9d23ce4..73cfc2b 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -64,8 +64,6 @@
 namespace openjdkjvmti {
 
 // Class that can redefine a single class's methods.
-// TODO We should really make this be driven by an outside class so we can do multiple classes at
-// the same time and have less required cleanup.
 class Redefiner {
  public:
   // Redefine the given class with the given dex data. Note this function does not take ownership of
@@ -126,14 +124,6 @@
   // in the future. For now we will just take the memory hit.
   bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-  // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
-  // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
-  // DexCache.
-  void EnsureObsoleteMethodsAreDeoptimized()
-      REQUIRES(art::Locks::mutator_lock_)
-      REQUIRES(!art::Locks::thread_list_lock_,
-               !art::Locks::classlinker_classes_lock_);
-
   art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
@@ -180,13 +170,6 @@
   bool UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
                    art::ObjPtr<art::mirror::DexCache> new_dex_cache)
       REQUIRES(art::Locks::mutator_lock_);
-
-  bool FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
-      REQUIRES(art::Locks::mutator_lock_);
-
-  void FillObsoleteMethodMap(art::mirror::Class* art_klass,
-                             const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
-      REQUIRES(art::Locks::mutator_lock_);
 };
 
 }  // namespace openjdkjvmti
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f9efc0b..3fed7c9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -618,17 +618,6 @@
   return result;
 }
 
-void StackVisitor::SetMethod(ArtMethod* method) {
-  DCHECK(GetMethod() != nullptr);
-  if (cur_shadow_frame_ != nullptr) {
-    cur_shadow_frame_->SetMethod(method);
-  } else {
-    DCHECK(cur_quick_frame_ != nullptr);
-    CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!";
-      *cur_quick_frame_ = method;
-  }
-}
-
 static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 9dceb29..b1e99e5 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -327,12 +327,6 @@
     }
   }
 
-  void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) {
-    DCHECK(method != nullptr);
-    DCHECK(method_ != nullptr);
-    method_ = method;
-  }
-
   ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(method_ != nullptr);
     return method_;
@@ -616,10 +610,6 @@
 
   ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
-  // doesn't work with inlined methods.
-  void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
-
   ArtMethod* GetOuterMethod() const {
     return *GetCurrentQuickFrame();
   }
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 10f1be5..aa4a259 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -112,7 +112,8 @@
       } else if (lhs.HasClass() && rhs.HasClass()) {
         // Test assignability from the Class point-of-view.
         bool result = lhs.GetClass()->IsAssignableFrom(rhs.GetClass());
-        // Record assignability dependency. The `verifier` is null during unit tests.
+        // Record assignability dependency. The `verifier` is null during unit tests and
+        // VerifiedMethod::GenerateSafeCastSet.
         if (verifier != nullptr) {
           VerifierDeps::MaybeRecordAssignability(
               verifier->GetDexFile(), lhs.GetClass(), rhs.GetClass(), strict, result);
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 3af7c01..5f94a1b 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -249,11 +249,12 @@
   if (dex_cache == nullptr) {
     // This is a synthesized class, in this case always an array. They are not
     // defined in the compiled DEX files and therefore are part of the classpath.
-    // We could avoid recording dependencies on arrays with component types in
-    // the compiled DEX files but we choose to record them anyway so as to
-    // record the access flags VM sets for array classes.
+    // We do not record dependencies on arrays with component types in
+    // the compiled DEX files, as the only thing that might change is their
+    // access flags. If we were to change these flags in a breaking way, we would
+    // need to enforce full verification again anyways by updating the vdex version.
     DCHECK(klass->IsArrayClass()) << klass->PrettyDescriptor();
-    return true;
+    return false;
   }
 
   const DexFile* dex_file = dex_cache->GetDexFile();
@@ -355,6 +356,12 @@
     return;
   }
 
+  if (source->IsObjectClass() && !is_assignable) {
+    // j.l.Object is trivially non-assignable to other types, don't
+    // record it.
+    return;
+  }
+
   if (destination == source ||
       destination->IsObjectClass() ||
       (!is_strict && destination->IsInterface())) {
@@ -396,6 +403,21 @@
     return;
   }
 
+  if (!IsInClassPath(source) && !source->IsInterface() && !destination->IsInterface()) {
+    // Find the super class at the classpath boundary. Only that class
+    // can change the assignability.
+    // TODO: also chase the boundary for interfaces.
+    do {
+      source = source->GetSuperClass();
+    } while (!IsInClassPath(source));
+
+    // If that class is the actual destination, no need to record it.
+    if (source == destination) {
+      return;
+    }
+  }
+
+
   // Get string IDs for both descriptors and store in the appropriate set.
   dex::StringIndex destination_id = GetClassDescriptorStringId(dex_file, destination);
   dex::StringIndex source_id = GetClassDescriptorStringId(dex_file, source);
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index ce5bda1..7cc0b8b 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -82,6 +82,88 @@
     return 0;
   }
 
+  // Regression test for b/33774618: transfer operations involving
+  // narrowing linear induction should be done correctly.
+  //
+  /// CHECK-START: int Main.transferNarrowWrap() loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.transferNarrowWrap() loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  static int transferNarrowWrap() {
+    short x = 0;
+    int w = 10;
+    int v = 3;
+    for (int i = 0; i < 10; i++) {
+      v = w + 1;    // transfer on wrap-around
+      w = x;   // wrap-around
+      x += 2;  // narrowing linear
+    }
+    return v;
+  }
+
+  // Regression test for b/33774618: transfer operations involving
+  // narrowing linear induction should be done correctly
+  // (currently rejected, could be improved).
+  //
+  /// CHECK-START: int Main.polynomialShort() loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.polynomialShort() loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  static int polynomialShort() {
+    int x = 0;
+    for (short i = 0; i < 10; i++) {
+      x = x - i;  // polynomial on narrowing linear
+    }
+    return x;
+  }
+
+  // Regression test for b/33774618: transfer operations involving
+  // narrowing linear induction should be done correctly
+  // (currently rejected, could be improved).
+  //
+  /// CHECK-START: int Main.polynomialIntFromLong() loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.polynomialIntFromLong() loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  static int polynomialIntFromLong() {
+    int x = 0;
+    for (long i = 0; i < 10; i++) {
+      x = x - (int) i;  // polynomial on narrowing linear
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.polynomialInt() loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.polynomialInt() loop_optimization (after)
+  /// CHECK-NOT: Phi
+  //
+  /// CHECK-START: int Main.polynomialInt() instruction_simplifier$after_bce (after)
+  /// CHECK-DAG: <<Int:i\d+>>  IntConstant -45 loop:none
+  /// CHECK-DAG:               Return [<<Int>>]  loop:none
+  static int polynomialInt() {
+    int x = 0;
+    for (int i = 0; i < 10; i++) {
+      x = x - i;
+    }
+    return x;
+  }
+
   public static void main(String[] args) {
     expectEquals(10, earlyExitFirst(-1));
     for (int i = 0; i <= 10; i++) {
@@ -98,6 +180,11 @@
 
     expectEquals(2, earlyExitNested());
 
+    expectEquals(17, transferNarrowWrap());
+    expectEquals(-45, polynomialShort());
+    expectEquals(-45, polynomialIntFromLong());
+    expectEquals(-45, polynomialInt());
+
     System.out.println("passed");
   }
 
diff --git a/test/626-checker-arm64-scratch-register/expected.txt b/test/626-checker-arm64-scratch-register/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/626-checker-arm64-scratch-register/info.txt b/test/626-checker-arm64-scratch-register/info.txt
new file mode 100644
index 0000000..8472131
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/info.txt
@@ -0,0 +1,2 @@
+Regression test checking that the ARM64 scratch register pool is not
+exhausted during moves between stack slots (b/32545705).
diff --git a/test/626-checker-arm64-scratch-register/src/Main.java b/test/626-checker-arm64-scratch-register/src/Main.java
new file mode 100644
index 0000000..aa211be
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/src/Main.java
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  boolean b00;
+  boolean b01;
+  boolean b02;
+  boolean b03;
+  boolean b04;
+  boolean b05;
+  boolean b06;
+  boolean b07;
+  boolean b08;
+  boolean b09;
+  boolean b10;
+  boolean b11;
+  boolean b12;
+  boolean b13;
+  boolean b14;
+  boolean b15;
+  boolean b16;
+  boolean b17;
+  boolean b18;
+  boolean b19;
+  boolean b20;
+  boolean b21;
+  boolean b22;
+  boolean b23;
+  boolean b24;
+  boolean b25;
+  boolean b26;
+  boolean b27;
+  boolean b28;
+  boolean b29;
+  boolean b30;
+  boolean b31;
+  boolean b32;
+  boolean b33;
+  boolean b34;
+  boolean b35;
+  boolean b36;
+
+  boolean conditionA;
+  boolean conditionB;
+  boolean conditionC;
+
+  /// CHECK-START-ARM64: void Main.test() register (after)
+  /// CHECK: begin_block
+  /// CHECK:   name "B0"
+  /// CHECK:       <<This:l\d+>>  ParameterValue
+  /// CHECK: end_block
+  /// CHECK: begin_block
+  /// CHECK:   successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+  /// CHECK:       <<CondB:z\d+>>  InstanceFieldGet [<<This>>] field_name:Main.conditionB
+  /// CHECK:                       If [<<CondB>>]
+  /// CHECK:  end_block
+  /// CHECK: begin_block
+  /// CHECK:   name "<<ElseBlock>>"
+  /// CHECK:                      ParallelMove moves:[#100->d17,32(sp)->d1,36(sp)->d2,d17->d3,d3->d4,d4->d5,d5->d6,d6->d7,d7->d18,d18->d19,d19->d20,d20->d21,d21->d22,d22->d23,d23->d10,d10->d11,d11->d12,24(sp)->d13,28(sp)->d14,d14->16(sp),d12->20(sp),d13->24(sp),d1->28(sp),d2->32(sp),16(sp)->36(sp),20(sp)->40(sp)]
+  /// CHECK: end_block
+
+  /// CHECK-START-ARM64: void Main.test() disassembly (after)
+  /// CHECK: begin_block
+  /// CHECK:   name "B0"
+  /// CHECK:       <<This:l\d+>>  ParameterValue
+  /// CHECK: end_block
+  /// CHECK: begin_block
+  /// CHECK:   successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+  /// CHECK:       <<CondB:z\d+>>  InstanceFieldGet [<<This>>] field_name:Main.conditionB
+  /// CHECK:                       If [<<CondB>>]
+  /// CHECK:  end_block
+  /// CHECK: begin_block
+  /// CHECK:   name "<<ElseBlock>>"
+  /// CHECK:                      ParallelMove moves:[invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid]
+  /// CHECK:                        fmov d31, d2
+  /// CHECK:                        ldr s2, [sp, #36]
+  /// CHECK:                        ldr w16, [sp, #16]
+  /// CHECK:                        str w16, [sp, #36]
+  /// CHECK:                        str s14, [sp, #16]
+  /// CHECK:                        ldr s14, [sp, #28]
+  /// CHECK:                        str s1, [sp, #28]
+  /// CHECK:                        ldr s1, [sp, #32]
+  /// CHECK:                        str s31, [sp, #32]
+  /// CHECK:                        ldr w16, [sp, #20]
+  /// CHECK:                        str w16, [sp, #40]
+  /// CHECK:                        str s12, [sp, #20]
+  /// CHECK:                        fmov d12, d11
+  /// CHECK:                        fmov d11, d10
+  /// CHECK:                        fmov d10, d23
+  /// CHECK:                        fmov d23, d22
+  /// CHECK:                        fmov d22, d21
+  /// CHECK:                        fmov d21, d20
+  /// CHECK:                        fmov d20, d19
+  /// CHECK:                        fmov d19, d18
+  /// CHECK:                        fmov d18, d7
+  /// CHECK:                        fmov d7, d6
+  /// CHECK:                        fmov d6, d5
+  /// CHECK:                        fmov d5, d4
+  /// CHECK:                        fmov d4, d3
+  /// CHECK:                        fmov d3, d17
+  /// CHECK:                        fmov d17, d13
+  /// CHECK:                        ldr s13, [sp, #24]
+  /// CHECK:                        str s17, [sp, #24]
+  /// CHECK:                        ldr s17, pc+{{\d+}} (addr {{0x[0-9a-f]+}}) (100)
+  /// CHECK: end_block
+
+  public void test() {
+    String r = "";
+
+    // For the purpose of this regression test, the order of
+    // definition of these float variable matters.  Likewise with the
+    // order of the instructions where these variables are used below.
+    // Reordering these lines make make the original (b/32545705)
+    // issue vanish.
+    float f17 = b17 ? 0.0f : 1.0f;
+    float f16 = b16 ? 0.0f : 1.0f;
+    float f18 = b18 ? 0.0f : 1.0f;
+    float f19 = b19 ? 0.0f : 1.0f;
+    float f20 = b20 ? 0.0f : 1.0f;
+    float f21 = b21 ? 0.0f : 1.0f;
+    float f15 = b15 ? 0.0f : 1.0f;
+    float f00 = b00 ? 0.0f : 1.0f;
+    float f22 = b22 ? 0.0f : 1.0f;
+    float f23 = b23 ? 0.0f : 1.0f;
+    float f24 = b24 ? 0.0f : 1.0f;
+    float f25 = b25 ? 0.0f : 1.0f;
+    float f26 = b26 ? 0.0f : 1.0f;
+    float f27 = b27 ? 0.0f : 1.0f;
+    float f29 = b29 ? 0.0f : 1.0f;
+    float f28 = b28 ? 0.0f : 1.0f;
+    float f01 = b01 ? 0.0f : 1.0f;
+    float f02 = b02 ? 0.0f : 1.0f;
+    float f03 = b03 ? 0.0f : 1.0f;
+    float f04 = b04 ? 0.0f : 1.0f;
+    float f05 = b05 ? 0.0f : 1.0f;
+    float f07 = b07 ? 0.0f : 1.0f;
+    float f06 = b06 ? 0.0f : 1.0f;
+    float f30 = b30 ? 0.0f : 1.0f;
+    float f31 = b31 ? 0.0f : 1.0f;
+    float f32 = b32 ? 0.0f : 1.0f;
+    float f33 = b33 ? 0.0f : 1.0f;
+    float f34 = b34 ? 0.0f : 1.0f;
+    float f36 = b36 ? 0.0f : 1.0f;
+    float f35 = b35 ? 0.0f : 1.0f;
+    float f08 = b08 ? 0.0f : 1.0f;
+    float f09 = b09 ? 0.0f : 1.0f;
+    float f10 = b10 ? 0.0f : 1.0f;
+    float f11 = b11 ? 0.0f : 1.0f;
+    float f12 = b12 ? 0.0f : 1.0f;
+    float f14 = b14 ? 0.0f : 1.0f;
+    float f13 = b13 ? 0.0f : 1.0f;
+
+    if (conditionA) {
+      f16 /= 1000.0f;
+      f17 /= 1000.0f;
+      f18 /= 1000.0f;
+      f19 /= 1000.0f;
+      f20 /= 1000.0f;
+      f21 /= 1000.0f;
+      f15 /= 1000.0f;
+      f08 /= 1000.0f;
+      f09 /= 1000.0f;
+      f10 /= 1000.0f;
+      f11 /= 1000.0f;
+      f12 /= 1000.0f;
+      f30 /= 1000.0f;
+      f31 /= 1000.0f;
+      f32 /= 1000.0f;
+      f33 /= 1000.0f;
+      f34 /= 1000.0f;
+      f01 /= 1000.0f;
+      f02 /= 1000.0f;
+      f03 /= 1000.0f;
+      f04 /= 1000.0f;
+      f05 /= 1000.0f;
+      f23 /= 1000.0f;
+      f24 /= 1000.0f;
+      f25 /= 1000.0f;
+      f26 /= 1000.0f;
+      f27 /= 1000.0f;
+      f22 /= 1000.0f;
+      f00 /= 1000.0f;
+      f14 /= 1000.0f;
+      f13 /= 1000.0f;
+      f36 /= 1000.0f;
+      f35 /= 1000.0f;
+      f07 /= 1000.0f;
+      f06 /= 1000.0f;
+      f29 /= 1000.0f;
+      f28 /= 1000.0f;
+    }
+    // The parallel move that used to exhaust the ARM64 parallel move
+    // resolver's scratch register pool (provided by VIXL) was in the
+    // "else" branch of the following condition generated by ART's
+    // compiler.
+    if (conditionB) {
+      f16 /= 100.0f;
+      f17 /= 100.0f;
+      f18 /= 100.0f;
+      f19 /= 100.0f;
+      f20 /= 100.0f;
+      f21 /= 100.0f;
+      f15 /= 100.0f;
+      f08 /= 100.0f;
+      f09 /= 100.0f;
+      f10 /= 100.0f;
+      f11 /= 100.0f;
+      f12 /= 100.0f;
+      f30 /= 100.0f;
+      f31 /= 100.0f;
+      f32 /= 100.0f;
+      f33 /= 100.0f;
+      f34 /= 100.0f;
+      f01 /= 100.0f;
+      f02 /= 100.0f;
+      f03 /= 100.0f;
+      f04 /= 100.0f;
+      f05 /= 100.0f;
+      f23 /= 100.0f;
+      f24 /= 100.0f;
+      f25 /= 100.0f;
+      f26 /= 100.0f;
+      f27 /= 100.0f;
+      f22 /= 100.0f;
+      f00 /= 100.0f;
+      f14 /= 100.0f;
+      f13 /= 100.0f;
+      f36 /= 100.0f;
+      f35 /= 100.0f;
+      f07 /= 100.0f;
+      f06 /= 100.0f;
+      f29 /= 100.0f;
+      f28 /= 100.0f;
+    }
+    if (conditionC) {
+      f16 /= 12.0f;
+      f17 /= 12.0f;
+      f18 /= 12.0f;
+      f19 /= 12.0f;
+      f20 /= 12.0f;
+      f21 /= 12.0f;
+      f15 /= 12.0f;
+      f08 /= 12.0f;
+      f09 /= 12.0f;
+      f10 /= 12.0f;
+      f11 /= 12.0f;
+      f12 /= 12.0f;
+      f30 /= 12.0f;
+      f31 /= 12.0f;
+      f32 /= 12.0f;
+      f33 /= 12.0f;
+      f34 /= 12.0f;
+      f01 /= 12.0f;
+      f02 /= 12.0f;
+      f03 /= 12.0f;
+      f04 /= 12.0f;
+      f05 /= 12.0f;
+      f23 /= 12.0f;
+      f24 /= 12.0f;
+      f25 /= 12.0f;
+      f26 /= 12.0f;
+      f27 /= 12.0f;
+      f22 /= 12.0f;
+      f00 /= 12.0f;
+      f14 /= 12.0f;
+      f13 /= 12.0f;
+      f36 /= 12.0f;
+      f35 /= 12.0f;
+      f07 /= 12.0f;
+      f06 /= 12.0f;
+      f29 /= 12.0f;
+      f28 /= 12.0f;
+    }
+    float s = 0.0f;
+    s = ((float) Math.round(100.0f * s)) / 100.0f;
+    String res = s + r;
+  }
+
+  public static void main(String[] args) {
+    Main main = new Main();
+    main.test();
+    System.out.println("passed");
+  }
+}
diff --git a/test/628-vdex/run b/test/628-vdex/run
index f1b0a95..4cbcea3 100644
--- a/test/628-vdex/run
+++ b/test/628-vdex/run
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-exec ${RUN} --vdex "${@}"
+exec ${RUN} -Xcompiler-option --compiler-filter=verify-profile --vdex "${@}"
diff --git a/test/901-hello-ti-agent/run b/test/901-hello-ti-agent/run
index 4379349..8079a8c 100755
--- a/test/901-hello-ti-agent/run
+++ b/test/901-hello-ti-agent/run
@@ -14,6 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=901-hello-ti-agent \
+                   --android-runtime-option -Xplugin:${plugin}
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 4379349..94a8b2d 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+  if [[ "$@" != *"--debuggable"* ]]; then
+    other_args=" -Xcompiler-option --debuggable "
+  else
+    other_args=""
+  fi
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   --android-runtime-option -Xfully-deoptable \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/903-hello-tagging/run b/test/903-hello-tagging/run
index 4379349..5e3c0bd 100755
--- a/test/903-hello-tagging/run
+++ b/test/903-hello-tagging/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=903-hello-tagging,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/904-object-allocation/run b/test/904-object-allocation/run
index 4379349..2f7ad21 100755
--- a/test/904-object-allocation/run
+++ b/test/904-object-allocation/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=904-object-allocation,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/905-object-free/run b/test/905-object-free/run
index 4379349..753b742 100755
--- a/test/905-object-free/run
+++ b/test/905-object-free/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=905-object-free,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/906-iterate-heap/run b/test/906-iterate-heap/run
index 4379349..3e135a3 100755
--- a/test/906-iterate-heap/run
+++ b/test/906-iterate-heap/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
index 4379349..3f5a059 100755
--- a/test/907-get-loaded-classes/run
+++ b/test/907-get-loaded-classes/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=907-get-loaded-classes,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/908-gc-start-finish/run b/test/908-gc-start-finish/run
index 4379349..2fc35f0 100755
--- a/test/908-gc-start-finish/run
+++ b/test/908-gc-start-finish/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=908-gc-start-finish,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/910-methods/run b/test/910-methods/run
index 4379349..4dd2555 100755
--- a/test/910-methods/run
+++ b/test/910-methods/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=910-methods,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/911-get-stack-trace/run b/test/911-get-stack-trace/run
index 4379349..43fc325 100755
--- a/test/911-get-stack-trace/run
+++ b/test/911-get-stack-trace/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=911-get-stack-trace,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/912-classes/run b/test/912-classes/run
index 4379349..64bbb98 100755
--- a/test/912-classes/run
+++ b/test/912-classes/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=912-classes,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/913-heaps/run b/test/913-heaps/run
index 4379349..7bd8cbd 100755
--- a/test/913-heaps/run
+++ b/test/913-heaps/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=913-heaps,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/914-hello-obsolescence/build b/test/914-hello-obsolescence/build
deleted file mode 100755
index 898e2e5..0000000
--- a/test/914-hello-obsolescence/build
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-build "$@" --experimental agents
diff --git a/test/914-hello-obsolescence/expected.txt b/test/914-hello-obsolescence/expected.txt
deleted file mode 100644
index 83efda1..0000000
--- a/test/914-hello-obsolescence/expected.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-hello
-Not doing anything here
-goodbye
-hello
-transforming calling function
-goodbye
-Hello - Transformed
-Not doing anything here
-Goodbye - Transformed
diff --git a/test/914-hello-obsolescence/info.txt b/test/914-hello-obsolescence/info.txt
deleted file mode 100644
index c8b892c..0000000
--- a/test/914-hello-obsolescence/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests basic obsolete method support
diff --git a/test/914-hello-obsolescence/run b/test/914-hello-obsolescence/run
deleted file mode 100755
index 4379349..0000000
--- a/test/914-hello-obsolescence/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-run "$@" --experimental agents \
-                   --experimental runtime-plugins \
-                   --jvmti
diff --git a/test/914-hello-obsolescence/src/Main.java b/test/914-hello-obsolescence/src/Main.java
deleted file mode 100644
index 46266ef..0000000
--- a/test/914-hello-obsolescence/src/Main.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Base64;
-
-public class Main {
-  // class Transform {
-  //   public void sayHi(Runnable r) {
-  //     System.out.println("Hello - Transformed");
-  //     r.run();
-  //     System.out.println("Goodbye - Transformed");
-  //   }
-  // }
-  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
-    "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
-    "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
-    "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
-    "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
-    "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
-    "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
-    "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
-    "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsAAAA7AAIA" +
-    "AgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
-    "AQAPAAAAAgAQ");
-  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
-    "ZGV4CjAzNQAYeAMMXgYWxoeSHAS9EWKCCtVRSAGpqZVQAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
-    "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
-    "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
-    "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
-    "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
-    "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
-    "AwAAAA4ABAACAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
-    "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
-    "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
-    "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
-    "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
-    "LjEzAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICABMQCAQHc" +
-    "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
-    "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
-    "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
-
-  public static void main(String[] args) {
-    System.loadLibrary(args[1]);
-    doTest(new Transform());
-  }
-
-  public static void doTest(Transform t) {
-    t.sayHi(() -> { System.out.println("Not doing anything here"); });
-    t.sayHi(() -> {
-      System.out.println("transforming calling function");
-      doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
-    });
-    t.sayHi(() -> { System.out.println("Not doing anything here"); });
-  }
-
-  // Transforms the class
-  private static native void doCommonClassRedefinition(Class<?> target,
-                                                       byte[] classfile,
-                                                       byte[] dexfile);
-}
diff --git a/test/914-hello-obsolescence/src/Transform.java b/test/914-hello-obsolescence/src/Transform.java
deleted file mode 100644
index 8cda6cd..0000000
--- a/test/914-hello-obsolescence/src/Transform.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-class Transform {
-  public void sayHi(Runnable r) {
-    // Use lower 'h' to make sure the string will have a different string id
-    // than the transformation (the transformation code is the same except
-    // the actual printed String, which was making the test inacurately passing
-    // in JIT mode when loading the string from the dex cache, as the string ids
-    // of the two different strings were the same).
-    // We know the string ids will be different because lexicographically:
-    // "Hello" < "LTransform;" < "hello".
-    System.out.println("hello");
-    r.run();
-    System.out.println("goodbye");
-  }
-}
diff --git a/test/915-obsolete-2/build b/test/915-obsolete-2/build
deleted file mode 100755
index 898e2e5..0000000
--- a/test/915-obsolete-2/build
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-build "$@" --experimental agents
diff --git a/test/915-obsolete-2/expected.txt b/test/915-obsolete-2/expected.txt
deleted file mode 100644
index 04aff3a..0000000
--- a/test/915-obsolete-2/expected.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Pre Start private method call
-hello - private
-Post Start private method call
-Not doing anything here
-Pre Finish private method call
-goodbye - private
-Post Finish private method call
-Pre Start private method call
-hello - private
-Post Start private method call
-transforming calling function
-Pre Finish private method call
-Goodbye - private - Transformed
-Post Finish private method call
-Pre Start private method call - Transformed
-Hello - private - Transformed
-Post Start private method call - Transformed
-Not doing anything here
-Pre Finish private method call - Transformed
-Goodbye - private - Transformed
-Post Finish private method call - Transformed
diff --git a/test/915-obsolete-2/info.txt b/test/915-obsolete-2/info.txt
deleted file mode 100644
index c8b892c..0000000
--- a/test/915-obsolete-2/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests basic obsolete method support
diff --git a/test/915-obsolete-2/run b/test/915-obsolete-2/run
deleted file mode 100755
index 4379349..0000000
--- a/test/915-obsolete-2/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-run "$@" --experimental agents \
-                   --experimental runtime-plugins \
-                   --jvmti
diff --git a/test/915-obsolete-2/src/Main.java b/test/915-obsolete-2/src/Main.java
deleted file mode 100644
index bbeb726..0000000
--- a/test/915-obsolete-2/src/Main.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Base64;
-
-public class Main {
-  // class Transform {
-  //   private void Start() {
-  //     System.out.println("Hello - private - Transformed");
-  //   }
-  //
-  //   private void Finish() {
-  //     System.out.println("Goodbye - private - Transformed");
-  //   }
-  //
-  //   public void sayHi(Runnable r) {
-  //     System.out.println("Pre Start private method call - Transformed");
-  //     Start();
-  //     System.out.println("Post Start private method call - Transformed");
-  //     r.run();
-  //     System.out.println("Pre Finish private method call - Transformed");
-  //     Finish();
-  //     System.out.println("Post Finish private method call - Transformed");
-  //   }
-  // }
-  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
-    "yv66vgAAADQAMgoADgAZCQAaABsIABwKAB0AHggAHwgAIAoADQAhCAAiCwAjACQIACUKAA0AJggA" +
-    "JwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVTdGFydAEA" +
-    "BkZpbmlzaAEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7KVYBAApTb3VyY2VGaWxlAQAO" +
-    "VHJhbnNmb3JtLmphdmEMAA8AEAcAKgwAKwAsAQAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3Jt" +
-    "ZWQHAC0MAC4ALwEAH0dvb2RieWUgLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQBACtQcmUgU3RhcnQg" +
-    "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAATABABACxQb3N0IFN0YXJ0IHByaXZh" +
-    "dGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAcAMAwAMQAQAQAsUHJlIEZpbmlzaCBwcml2YXRl" +
-    "IG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQMABQAEAEALVBvc3QgRmluaXNoIHByaXZhdGUgbWV0" +
-    "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBq" +
-    "YXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9Q" +
-    "cmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABJqYXZhL2xhbmcv" +
-    "UnVubmFibGUBAANydW4AIAANAA4AAAAAAAQAAAAPABAAAQARAAAAHQABAAEAAAAFKrcAAbEAAAAB" +
-    "ABIAAAAGAAEAAAABAAIAEwAQAAEAEQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEAEgAAAAoAAgAA" +
-    "AAMACAAEAAIAFAAQAAEAEQAAACUAAgABAAAACbIAAhIFtgAEsQAAAAEAEgAAAAoAAgAAAAcACAAI" +
-    "AAEAFQAWAAEAEQAAAGMAAgACAAAAL7IAAhIGtgAEKrcAB7IAAhIItgAEK7kACQEAsgACEgq2AAQq" +
-    "twALsgACEgy2AASxAAAAAQASAAAAIgAIAAAACwAIAAwADAANABQADgAaAA8AIgAQACYAEQAuABIA" +
-    "AQAXAAAAAgAY");
-  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
-    "ZGV4CjAzNQCM0QYTJmX+NsZXkImojgSkJtXyuew3oaXcBAAAcAAAAHhWNBIAAAAAAAAAADwEAAAX" +
-    "AAAAcAAAAAcAAADMAAAAAwAAAOgAAAABAAAADAEAAAcAAAAUAQAAAQAAAEwBAABwAwAAbAEAAD4C" +
-    "AABGAgAATgIAAG8CAACOAgAAmwIAALICAADGAgAA3AIAAPACAAAEAwAAMwMAAGEDAACPAwAAvAMA" +
-    "AMMDAADTAwAA1gMAANoDAADuAwAA8wMAAPwDAAABBAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
-    "EAAAABAAAAAGAAAAAAAAABEAAAAGAAAAMAIAABEAAAAGAAAAOAIAAAUAAQATAAAAAAAAAAAAAAAA" +
-    "AAAAAQAAAAAAAAAOAAAAAAABABYAAAABAAIAFAAAAAIAAAAAAAAAAwAAABUAAAAAAAAAAAAAAAIA" +
-    "AAAAAAAADwAAAAAAAAAmBAAAAAAAAAEAAQABAAAACAQAAAQAAABwEAUAAAAOAAMAAQACAAAADQQA" +
-    "AAkAAABiAAAAGwECAAAAbiAEABAADgAAAAMAAQACAAAAEwQAAAkAAABiAAAAGwEDAAAAbiAEABAA" +
-    "DgAAAAQAAgACAAAAGQQAACoAAABiAAAAGwENAAAAbiAEABAAcBACAAIAYgAAABsBCwAAAG4gBAAQ" +
-    "AHIQBgADAGIAAAAbAQwAAABuIAQAEABwEAEAAgBiAAAAGwEKAAAAbiAEABAADgABAAAAAwAAAAEA" +
-    "AAAEAAY8aW5pdD4ABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBUcmFuc2Zvcm1lZAAdSGVs" +
-    "bG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07ABVMamF2YS9pby9QcmludFN0" +
-    "cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xh" +
-    "bmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AC1Qb3N0IEZpbmlzaCBwcml2YXRlIG1ldGhv" +
-    "ZCBjYWxsIC0gVHJhbnNmb3JtZWQALFBvc3QgU3RhcnQgcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRy" +
-    "YW5zZm9ybWVkACxQcmUgRmluaXNoIHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAr" +
-    "UHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAFU3RhcnQADlRyYW5z" +
-    "Zm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjEzAANvdXQAB3ByaW50bG4AA3J1bgAF" +
-    "c2F5SGkAAQAHDgAHAAcOhwADAAcOhwALAQAHDoc8hzyHPIcAAAADAQCAgATsAgEChAMBAqgDAwHM" +
-    "Aw0AAAAAAAAAAQAAAAAAAAABAAAAFwAAAHAAAAACAAAABwAAAMwAAAADAAAAAwAAAOgAAAAEAAAA" +
-    "AQAAAAwBAAAFAAAABwAAABQBAAAGAAAAAQAAAEwBAAABIAAABAAAAGwBAAABEAAAAgAAADACAAAC" +
-    "IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
-
-  public static void main(String[] args) {
-    System.loadLibrary(args[1]);
-    doTest(new Transform());
-  }
-
-  public static void doTest(Transform t) {
-    t.sayHi(() -> { System.out.println("Not doing anything here"); });
-    t.sayHi(() -> {
-      System.out.println("transforming calling function");
-      doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
-    });
-    t.sayHi(() -> { System.out.println("Not doing anything here"); });
-  }
-
-  // Transforms the class
-  private static native void doCommonClassRedefinition(Class<?> target,
-                                                       byte[] classfile,
-                                                       byte[] dexfile);
-}
diff --git a/test/915-obsolete-2/src/Transform.java b/test/915-obsolete-2/src/Transform.java
deleted file mode 100644
index e914e29..0000000
--- a/test/915-obsolete-2/src/Transform.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-class Transform {
-  private void Start() {
-    System.out.println("hello - private");
-  }
-
-  private void Finish() {
-    System.out.println("goodbye - private");
-  }
-
-  public void sayHi(Runnable r) {
-    System.out.println("Pre Start private method call");
-    Start();
-    System.out.println("Post Start private method call");
-    r.run();
-    System.out.println("Pre Finish private method call");
-    Finish();
-    System.out.println("Post Finish private method call");
-  }
-}
diff --git a/test/916-obsolete-jit/build b/test/916-obsolete-jit/build
deleted file mode 100755
index 898e2e5..0000000
--- a/test/916-obsolete-jit/build
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-build "$@" --experimental agents
diff --git a/test/916-obsolete-jit/expected.txt b/test/916-obsolete-jit/expected.txt
deleted file mode 100644
index 4caefc6..0000000
--- a/test/916-obsolete-jit/expected.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Pre Start private method call
-hello - private
-Post Start private method call
-Not doing anything here
-Pre Finish private method call
-goodbye - private
-Post Finish private method call
-Pre Start private method call
-hello - private
-Post Start private method call
-transforming calling function
-Pre Finish private method call
-Goodbye - private - Transformed
-Post Finish private method call
-pre Start private method call - Transformed
-Hello - private - Transformed
-post Start private method call - Transformed
-Not doing anything here
-pre Finish private method call - Transformed
-Goodbye - private - Transformed
-post Finish private method call - Transformed
diff --git a/test/916-obsolete-jit/info.txt b/test/916-obsolete-jit/info.txt
deleted file mode 100644
index c8b892c..0000000
--- a/test/916-obsolete-jit/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests basic obsolete method support
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
deleted file mode 100755
index 9056211..0000000
--- a/test/916-obsolete-jit/run
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# We are testing the redefinition of compiled code but with jvmti we only allow
-# jitted compiled code so always add the --jit argument.
-if [[ "$@" == *"--jit"* ]]; then
-  other_args=""
-else
-  other_args="--jit"
-fi
-./default-run "$@" --experimental agents \
-                   --experimental runtime-plugins \
-                   ${other_args} \
-                   --jvmti
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
deleted file mode 100644
index 68e01a0..0000000
--- a/test/916-obsolete-jit/src/Main.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.function.Consumer;
-import java.lang.reflect.Method;
-import java.util.Base64;
-
-public class Main {
-
-  // import java.util.function.Consumer;
-  //
-  // class Transform {
-  //   private void Start(Consumer<String> reporter) {
-  //     reporter.accept("Hello - private - Transformed");
-  //   }
-  //
-  //   private void Finish(Consumer<String> reporter) {
-  //     reporter.accept("Goodbye - private - Transformed");
-  //   }
-  //
-  //   public void sayHi(Runnable r, Consumer<String> reporter) {
-  //     reporter.accept("pre Start private method call - Transformed");
-  //     Start(reporter);
-  //     reporter.accept("post Start private method call - Transformed");
-  //     r.run();
-  //     reporter.accept("pre Finish private method call - Transformed");
-  //     Finish(reporter);
-  //     reporter.accept("post Finish private method call - Transformed");
-  //   }
-  // }
-  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
-    "yv66vgAAADQAMAoADQAcCAAdCwAeAB8IACAIACEKAAwAIggAIwsAJAAlCAAmCgAMACcIACgHACkH" +
-    "ACoBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFU3RhcnQBACAoTGph" +
-    "dmEvdXRpbC9mdW5jdGlvbi9Db25zdW1lcjspVgEACVNpZ25hdHVyZQEANChMamF2YS91dGlsL2Z1" +
-    "bmN0aW9uL0NvbnN1bWVyPExqYXZhL2xhbmcvU3RyaW5nOz47KVYBAAZGaW5pc2gBAAVzYXlIaQEA" +
-    "NChMamF2YS9sYW5nL1J1bm5hYmxlO0xqYXZhL3V0aWwvZnVuY3Rpb24vQ29uc3VtZXI7KVYBAEgo" +
-    "TGphdmEvbGFuZy9SdW5uYWJsZTtMamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyPExqYXZhL2xh" +
-    "bmcvU3RyaW5nOz47KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAA4ADwEAHUhlbGxv" +
-    "IC0gcHJpdmF0ZSAtIFRyYW5zZm9ybWVkBwArDAAsAC0BAB9Hb29kYnllIC0gcHJpdmF0ZSAtIFRy" +
-    "YW5zZm9ybWVkAQArcHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAwA" +
-    "EgATAQAscG9zdCBTdGFydCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQHAC4MAC8A" +
-    "DwEALHByZSBGaW5pc2ggcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAAWABMBAC1w" +
-    "b3N0IEZpbmlzaCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQBAAlUcmFuc2Zvcm0B" +
-    "ABBqYXZhL2xhbmcvT2JqZWN0AQAbamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyAQAGYWNjZXB0" +
-    "AQAVKExqYXZhL2xhbmcvT2JqZWN0OylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAADAAN" +
-    "AAAAAAAEAAAADgAPAAEAEAAAAB0AAQABAAAABSq3AAGxAAAAAQARAAAABgABAAAAEwACABIAEwAC" +
-    "ABAAAAAlAAIAAgAAAAkrEgK5AAMCALEAAAABABEAAAAKAAIAAAAVAAgAFgAUAAAAAgAVAAIAFgAT" +
-    "AAIAEAAAACUAAgACAAAACSsSBLkAAwIAsQAAAAEAEQAAAAoAAgAAABkACAAaABQAAAACABUAAQAX" +
-    "ABgAAgAQAAAAZQACAAMAAAAxLBIFuQADAgAqLLcABiwSB7kAAwIAK7kACAEALBIJuQADAgAqLLcA" +
-    "CiwSC7kAAwIAsQAAAAEAEQAAACIACAAAAB0ACAAeAA0AHwAVACAAGwAhACMAIgAoACMAMAAkABQA" +
-    "AAACABkAAQAaAAAAAgAb");
-  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
-    "ZGV4CjAzNQBc8wr9PcHqnOR61m+0kimXTSddVMToJPuYBQAAcAAAAHhWNBIAAAAAAAAAAOAEAAAc" +
-    "AAAAcAAAAAYAAADgAAAABAAAAPgAAAAAAAAAAAAAAAcAAAAoAQAAAQAAAGABAAAYBAAAgAEAAHoC" +
-    "AAB9AgAAgAIAAIgCAACOAgAAlgIAALcCAADWAgAA4wIAAAIDAAAWAwAALAMAAEADAABeAwAAfQMA" +
-    "AIQDAACUAwAAlwMAAJsDAACgAwAAqAMAALwDAADrAwAAGQQAAEcEAAB0BAAAeQQAAIAEAAAHAAAA" +
-    "CAAAAAkAAAAKAAAADQAAABAAAAAQAAAABQAAAAAAAAARAAAABQAAAGQCAAASAAAABQAAAGwCAAAR" +
-    "AAAABQAAAHQCAAAAAAAAAgAAAAAAAwAEAAAAAAADAA4AAAAAAAIAGgAAAAIAAAACAAAAAwAAABkA" +
-    "AAAEAAEAEwAAAAAAAAAAAAAAAgAAAAAAAAAPAAAAPAIAAMoEAAAAAAAAAQAAAKgEAAABAAAAuAQA" +
-    "AAEAAQABAAAAhwQAAAQAAABwEAQAAAAOAAMAAgACAAAAjAQAAAcAAAAbAAUAAAByIAYAAgAOAAAA" +
-    "AwACAAIAAACTBAAABwAAABsABgAAAHIgBgACAA4AAAAEAAMAAgAAAJoEAAAiAAAAGwAYAAAAciAG" +
-    "AAMAcCACADEAGwAWAAAAciAGAAMAchAFAAIAGwAXAAAAciAGAAMAcCABADEAGwAVAAAAciAGAAMA" +
-    "DgAAAAAAAAAAAAMAAAAAAAAAAQAAAIABAAACAAAAgAEAAAMAAACIAQAAAQAAAAIAAAACAAAAAwAE" +
-    "AAEAAAAEAAEoAAE8AAY8aW5pdD4ABD47KVYABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBU" +
-    "cmFuc2Zvcm1lZAAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07AB1M" +
-    "ZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9s" +
-    "YW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABxMamF2YS91dGlsL2Z1bmN0aW9uL0Nv" +
-    "bnN1bWVyAB1MamF2YS91dGlsL2Z1bmN0aW9uL0NvbnN1bWVyOwAFU3RhcnQADlRyYW5zZm9ybS5q" +
-    "YXZhAAFWAAJWTAADVkxMAAZhY2NlcHQAEmVtaXR0ZXI6IGphY2stNC4xOQAtcG9zdCBGaW5pc2gg" +
-    "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkACxwb3N0IFN0YXJ0IHByaXZhdGUgbWV0" +
-    "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAscHJlIEZpbmlzaCBwcml2YXRlIG1ldGhvZCBjYWxsIC0g" +
-    "VHJhbnNmb3JtZWQAK3ByZSBTdGFydCBwcml2YXRlIG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQA" +
-    "A3J1bgAFc2F5SGkABXZhbHVlABMABw4AGQEABw5pABUBAAcOaQAdAgAABw5pPGk8aTxpAAIBARsc" +
-    "BRcAFwwXARcLFwMCAQEbHAYXABcKFwwXARcLFwMAAAMBAICABJADAQKoAwECyAMDAegDDwAAAAAA" +
-    "AAABAAAAAAAAAAEAAAAcAAAAcAAAAAIAAAAGAAAA4AAAAAMAAAAEAAAA+AAAAAUAAAAHAAAAKAEA" +
-    "AAYAAAABAAAAYAEAAAMQAAACAAAAgAEAAAEgAAAEAAAAkAEAAAYgAAABAAAAPAIAAAEQAAADAAAA" +
-    "ZAIAAAIgAAAcAAAAegIAAAMgAAAEAAAAhwQAAAQgAAACAAAAqAQAAAAgAAABAAAAygQAAAAQAAAB" +
-    "AAAA4AQAAA==");
-
-  // A class that we can use to keep track of the output of this test.
-  private static class TestWatcher implements Consumer<String> {
-    private StringBuilder sb;
-    public TestWatcher() {
-      sb = new StringBuilder();
-    }
-
-    @Override
-    public void accept(String s) {
-      sb.append(s);
-      sb.append('\n');
-    }
-
-    public String getOutput() {
-      return sb.toString();
-    }
-
-    public void clear() {
-      sb = new StringBuilder();
-    }
-  }
-
-  public static void main(String[] args) {
-    System.loadLibrary(args[1]);
-    doTest(new Transform(), new TestWatcher());
-  }
-
-  // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
-  // the JIT and/or (2) inability to deoptimize frames near runtime functions.
-  // TODO Fix one/both of these issues.
-  public static void doCall(Runnable r) {
-      r.run();
-  }
-
-  private static boolean interpreting = true;
-  private static boolean retry = false;
-
-  public static void doTest(Transform t, TestWatcher w) {
-    // Get the methods that need to be optimized.
-    Method say_hi_method;
-    Method do_call_method;
-    try {
-      say_hi_method = Transform.class.getDeclaredMethod(
-          "sayHi", Runnable.class, Consumer.class);
-      do_call_method = Main.class.getDeclaredMethod("doCall", Runnable.class);
-    } catch (Exception e) {
-      System.out.println("Unable to find methods!");
-      e.printStackTrace();
-      return;
-    }
-    // Makes sure the stack is the way we want it for the test and does the redefinition. It will
-    // set the retry boolean to true if we need to go around again due to a bad stack.
-    Runnable do_redefinition = () -> {
-      if (Main.isInterpretedFunction(say_hi_method, true) ||
-          Main.isInterpretedFunction(do_call_method, false)) {
-        // Try again. We are not running the right jitted methods/cannot redefine them now.
-        retry = true;
-      } else {
-        // Actually do the redefinition. The stack looks good.
-        retry = false;
-        w.accept("transforming calling function");
-        doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
-      }
-    };
-    // This does nothing.
-    Runnable noop = () -> {};
-    // This just prints something out to show we are running the Runnable.
-    Runnable say_nothing = () -> { w.accept("Not doing anything here"); };
-    // This checks to see if we have jitted the methods we are testing.
-    Runnable check_interpreting = () -> {
-      // TODO remove the second check when we remove the doCall function. We need to check that
-      // both of these functions aren't being interpreted because if sayHi is the test doesn't do
-      // anything and if doCall is then there will be a runtime call right above the sayHi
-      // function preventing sayHi from being deoptimized.
-      interpreting = Main.isInterpretedFunction(say_hi_method, true) ||
-                      Main.isInterpretedFunction(do_call_method, false);
-    };
-    do {
-      w.clear();
-      // Wait for the methods to be jitted
-      long j = 0;
-      do {
-        for (int i = 0; i < 10000; i++) {
-          t.sayHi(noop, w);
-          j++;
-        }
-        t.sayHi(check_interpreting, w);
-        if (j >= 1000000) {
-          System.out.println("FAIL: Could not make sayHi be Jitted!");
-          return;
-        }
-        j++;
-      } while(interpreting);
-      // Clear output. Now we try for real.
-      w.clear();
-      // Try and redefine.
-      t.sayHi(say_nothing, w);
-      t.sayHi(do_redefinition, w);
-      t.sayHi(say_nothing, w);
-    } while (retry);
-    // Print output of last run.
-    System.out.print(w.getOutput());
-  }
-
-  private static native boolean isInterpretedFunction(Method m, boolean require_deoptimizable);
-
-  // Transforms the class
-  private static native void doCommonClassRedefinition(Class<?> target,
-                                                       byte[] classfile,
-                                                       byte[] dexfile);
-}
diff --git a/test/916-obsolete-jit/src/Transform.java b/test/916-obsolete-jit/src/Transform.java
deleted file mode 100644
index f4dcf09..0000000
--- a/test/916-obsolete-jit/src/Transform.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.function.Consumer;
-
-class Transform {
-  private void Start(Consumer<String> reporter) {
-    reporter.accept("hello - private");
-  }
-
-  private void Finish(Consumer<String> reporter) {
-    reporter.accept("goodbye - private");
-  }
-
-  public void sayHi(Runnable r, Consumer<String> reporter) {
-    reporter.accept("Pre Start private method call");
-    Start(reporter);
-    reporter.accept("Post Start private method call");
-    // TODO Revisit with b/33616143
-    // TODO Uncomment this once either b/33630159 or b/33616143 are resolved.
-    // r.run();
-    // TODO This doCall function is a very temporary fix until we get either deoptimization near
-    // runtime frames working, forcing current method to be always read from the stack or both
-    // working.
-    Main.doCall(r);
-    reporter.accept("Pre Finish private method call");
-    Finish(reporter);
-    reporter.accept("Post Finish private method call");
-  }
-}
diff --git a/test/917-fields-transformation/run b/test/917-fields-transformation/run
index 4379349..a434b63 100755
--- a/test/917-fields-transformation/run
+++ b/test/917-fields-transformation/run
@@ -14,6 +14,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+  if [[ "$@" != *"--debuggable"* ]]; then
+    other_args=" -Xcompiler-option --debuggable "
+  else
+    other_args=""
+  fi
+fi
+
 ./default-run "$@" --experimental agents \
                    --experimental runtime-plugins \
-                   --jvmti
+                   --runtime-option -agentpath:${agent}=917-fields-transformation,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   --android-runtime-option -Xfully-deoptable \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/Android.bp b/test/Android.bp
index 5a2c902..2625f56 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -243,9 +243,6 @@
     name: "libtiagent-defaults",
     defaults: ["libartagent-defaults"],
     srcs: [
-        // This is to get the IsInterpreted native method.
-        "common/stack_inspect.cc",
-        "common/runtime_state.cc",
         "ti-agent/common_load.cc",
         "ti-agent/common_helper.cc",
         "901-hello-ti-agent/basics.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 56f7ed7..21331ce 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -227,8 +227,10 @@
 endef  # name-to-var
 
 # Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
+# Disable 080-oom-fragmentation due to flakes. b/33795328
 ART_TEST_RUN_TEST_SKIP += \
-  153-reference-stress
+  153-reference-stress \
+  080-oom-fragmentation
 
 ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
         $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -284,9 +286,6 @@
   911-get-stack-trace \
   912-classes \
   913-heaps \
-  914-hello-obsolescence \
-  915-obsolete-2 \
-  916-obsolete-jit \
   917-fields-transformation \
 
 ifneq (,$(filter target,$(TARGET_TYPES)))
@@ -540,15 +539,22 @@
 # Test 906 iterates the heap filtering with different options. No instances should be created
 # between those runs to be able to have precise checks.
 # Test 902 hits races with the JIT compiler. b/32821077
-# Test 626-const-class-linking can deadlock with JIT. b/33567581
 # Test 629 requires compilation.
+# Test 914, 915, 917, & 918 are very sensitive to the exact state of the stack,
+# including the jit-inserted runtime frames. This causes them to be somewhat
+# flaky as JIT tests. This should be fixed once b/33630159 or b/33616143 are
+# resolved but until then just disable them. Test 916 already checks this
+# feature for JIT use cases in a way that is resilient to the jit frames.
 TEST_ART_BROKEN_JIT_RUN_TESTS := \
   137-cfi \
-  626-const-class-linking \
   629-vdex-speed \
   902-hello-transformation \
   904-object-allocation \
   906-iterate-heap \
+  914-hello-obsolescence \
+  915-obsolete-2 \
+  917-fields-transformation \
+  918-obsolete-fields \
 
 ifneq (,$(filter jit,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -680,15 +686,6 @@
   endif
 endif
 
-# Tests disabled for GSS.
-TEST_ART_BROKEN_GSS_RUN_TESTS := 080-oom-fragmentation
-ifeq ($(ART_DEFAULT_GC_TYPE),GSS)
-  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \
-      $(PREBUILD_TYPES),$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \
-      $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
-      $(TEST_ART_BROKEN_GSS_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
 TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS :=
 TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
 
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index df7fa20..4df2d47 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -18,7 +18,6 @@
 
 #include "base/logging.h"
 #include "dex_file-inl.h"
-#include "jni_internal.h"
 #include "mirror/class-inl.h"
 #include "nth_caller_visitor.h"
 #include "oat_file.h"
@@ -53,89 +52,6 @@
   return IsInterpreted(env, klass, 1);
 }
 
-// public static native boolean isInterpreted(int depth);
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedAt(JNIEnv* env,
-                                                                jclass klass,
-                                                                jint depth) {
-  return IsInterpreted(env, klass, depth);
-}
-
-
-// public static native boolean isInterpretedFunction(String smali);
-
-// TODO Remove 'allow_runtime_frames' option once we have deoptimization through runtime frames.
-struct MethodIsInterpretedVisitor : public StackVisitor {
- public:
-  MethodIsInterpretedVisitor(Thread* thread, ArtMethod* goal, bool require_deoptable)
-      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        goal_(goal),
-        method_is_interpreted_(true),
-        method_found_(false),
-        prev_was_runtime_(true),
-        require_deoptable_(require_deoptable) {}
-
-  virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (goal_ == GetMethod()) {
-      method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
-      method_found_ = true;
-      return false;
-    }
-    prev_was_runtime_ = GetMethod()->IsRuntimeMethod();
-    return true;
-  }
-
-  bool IsInterpreted() {
-    return method_is_interpreted_;
-  }
-
-  bool IsFound() {
-    return method_found_;
-  }
-
- private:
-  const ArtMethod* goal_;
-  bool method_is_interpreted_;
-  bool method_found_;
-  bool prev_was_runtime_;
-  bool require_deoptable_;
-};
-
-// TODO Remove 'require_deoptimizable' option once we have deoptimization through runtime frames.
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
-    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method, jboolean require_deoptimizable) {
-  // Return false if this seems to not be an ART runtime.
-  if (Runtime::Current() == nullptr) {
-    return JNI_FALSE;
-  }
-  if (method == nullptr) {
-    env->ThrowNew(env->FindClass("java/lang/NullPointerException"), "method is null!");
-    return JNI_FALSE;
-  }
-  jmethodID id = env->FromReflectedMethod(method);
-  if (id == nullptr) {
-    env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to interpret method argument!");
-    return JNI_FALSE;
-  }
-  bool result;
-  bool found;
-  {
-    ScopedObjectAccess soa(env);
-    ArtMethod* goal = jni::DecodeArtMethod(id);
-    MethodIsInterpretedVisitor v(soa.Self(), goal, require_deoptimizable);
-    v.WalkStack();
-    bool enters_interpreter = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(
-        goal->GetEntryPointFromQuickCompiledCode());
-    result = (v.IsInterpreted() || enters_interpreter);
-    found = v.IsFound();
-  }
-  if (!found) {
-    env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
-    return JNI_FALSE;
-  }
-  return result;
-}
-
 // public static native void assertIsInterpreted();
 
 extern "C" JNIEXPORT void JNICALL Java_Main_assertIsInterpreted(JNIEnv* env, jclass klass) {
diff --git a/test/etc/default-build b/test/etc/default-build
index faa0813..a0408aa 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -69,7 +69,6 @@
 
 # Setup experimental flag mappings in a bash associative array.
 declare -A JACK_EXPERIMENTAL_ARGS
-JACK_EXPERIMENTAL_ARGS["agents"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
 JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
 JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
 JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.7 -D jack.android.min-api-level=o-b1"
@@ -77,14 +76,12 @@
 declare -A SMALI_EXPERIMENTAL_ARGS
 SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api-level 24"
 SMALI_EXPERIMENTAL_ARGS["method-handles"]="--api-level 26"
-SMALI_EXPERIMENTAL_ARGS["agents"]="--api-level 26"
 
 declare -A JAVAC_EXPERIMENTAL_ARGS
 JAVAC_EXPERIMENTAL_ARGS["default-methods"]="-source 1.8 -target 1.8"
 JAVAC_EXPERIMENTAL_ARGS["lambdas"]="-source 1.8 -target 1.8"
 JAVAC_EXPERIMENTAL_ARGS["method-handles"]="-source 1.8 -target 1.8"
 JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.7 -target 1.7"
-JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
 
 while true; do
   if [ "x$1" = "x--dx-option" ]; then
@@ -129,6 +126,16 @@
 # Be sure to get any default arguments if not doing any experiments.
 EXPERIMENTAL="${EXPERIMENTAL} ${DEFAULT_EXPERIMENT}"
 
+if [ "${JACK_SERVER}" = "false" ]; then
+  # Run in single-threaded mode for the continuous buildbot.
+  JACK_ARGS="${JACK_ARGS} -D sched.runner=single-threaded"
+else
+  # Run with 4 threads to reduce memory footprint and thread contention.
+  JACK_ARGS="${JACK_ARGS} -D sched.runner=multi-threaded"
+  JACK_ARGS="${JACK_ARGS} -D sched.runner.thread.kind=fixed"
+  JACK_ARGS="${JACK_ARGS} -D sched.runner.thread.fixed.count=4"
+fi
+
 # Add args from the experimental mappings.
 for experiment in ${EXPERIMENTAL}; do
   JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index dd9ca07..566f7ba 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -30,7 +30,6 @@
 INTERPRETER="n"
 JIT="n"
 INVOKE_WITH=""
-IS_JVMTI_TEST="n"
 ISA=x86
 LIBRARY_DIRECTORY="lib"
 TEST_DIRECTORY="nativetest"
@@ -66,9 +65,6 @@
     if [ "x$1" = "x--quiet" ]; then
         QUIET="y"
         shift
-    elif [ "x$1" = "x--jvmti" ]; then
-        IS_JVMTI_TEST="y"
-        shift
     elif [ "x$1" = "x-O" ]; then
         # Ignore this option.
         shift
@@ -386,28 +382,6 @@
     fi
 fi
 
-if [ "$IS_JVMTI_TEST" = "y" ]; then
-  plugin=libopenjdkjvmtid.so
-  agent=libtiagentd.so
-  lib=tiagentd
-  if  [[ "$@" == *"-O"* ]]; then
-    agent=libtiagent.so
-    plugin=libopenjdkjvmti.so
-    lib=tiagent
-  fi
-
-  ARGS="${ARGS} ${lib}"
-  if [[ "$USE_JVM" = "y" ]]; then
-    FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},jvm"
-  else
-    FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
-    FLAGS="${FLAGS} -Xplugin:${plugin}"
-    FLAGS="${FLAGS} -Xfully-deoptable"
-    # Always make the compilation be debuggable.
-    COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
-  fi
-fi
-
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
 if [ "$RELOCATE" = "y" ]; then
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index ebf1e46..3e2b168 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -18,11 +18,8 @@
 
 #include <stdio.h>
 
-#include "art_method.h"
 #include "jni.h"
 #include "openjdkjvmti/jvmti.h"
-#include "scoped_thread_state_change-inl.h"
-#include "stack.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
 
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 79c17d7..3886148 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -66,9 +66,6 @@
   { "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
   { "912-classes", Test912Classes::OnLoad, nullptr },
   { "913-heaps", Test913Heaps::OnLoad, nullptr },
-  { "914-hello-obsolescence", common_redefine::OnLoad, nullptr },
-  { "915-obsolete-2", common_redefine::OnLoad, nullptr },
-  { "916-obsolete-jit", common_redefine::OnLoad, nullptr },
   { "917-fields-transformation", common_redefine::OnLoad, nullptr },
 };
 
diff --git a/tools/jfuzz/run_jfuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index 42745d2..b5f856f 100755
--- a/tools/jfuzz/run_jfuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -43,10 +43,11 @@
 BISECTABLE_RET_CODES = (RetCode.SUCCESS, RetCode.ERROR, RetCode.TIMEOUT)
 
 
-def GetExecutionModeRunner(device, mode):
+def GetExecutionModeRunner(use_dx, device, mode):
   """Returns a runner for the given execution mode.
 
   Args:
+    use_dx: boolean, if True use dx rather than jack
     device: string, target device serial number (or None)
     mode: string, execution mode
   Returns:
@@ -57,13 +58,13 @@
   if mode == 'ri':
     return TestRunnerRIOnHost()
   if mode == 'hint':
-    return TestRunnerArtIntOnHost()
+    return TestRunnerArtIntOnHost(use_dx)
   if mode == 'hopt':
-    return TestRunnerArtOptOnHost()
+    return TestRunnerArtOptOnHost(use_dx)
   if mode == 'tint':
-    return TestRunnerArtIntOnTarget(device)
+    return TestRunnerArtIntOnTarget(use_dx, device)
   if mode == 'topt':
-    return TestRunnerArtOptOnTarget(device)
+    return TestRunnerArtOptOnTarget(use_dx, device)
   raise FatalError('Unknown execution mode')
 
 
@@ -113,6 +114,33 @@
     """
 
 
+class TestRunnerWithHostCompilation(TestRunner):
+  """Abstract test runner that supports compilation on host."""
+
+  def  __init__(self, use_dx):
+    """Constructor for the runner with host compilation.
+
+    Args:
+      use_dx: boolean, if True use dx rather than jack
+    """
+    self._jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.',
+                       'Test.java']
+    self._use_dx = use_dx
+
+  def CompileOnHost(self):
+    if self._use_dx:
+      if RunCommand(['javac', 'Test.java'],
+                    out=None, err=None, timeout=30) == RetCode.SUCCESS:
+        retc = RunCommand(['dx', '--dex', '--output=classes.dex'] + glob('*.class'),
+                          out=None, err='dxerr.txt', timeout=30)
+      else:
+        retc = RetCode.NOTCOMPILED
+    else:
+      retc = RunCommand(['jack'] + self._jack_args,
+                        out=None, err='jackerr.txt', timeout=30)
+    return retc
+
+
 class TestRunnerRIOnHost(TestRunner):
   """Concrete test runner of the reference implementation on host."""
 
@@ -136,25 +164,24 @@
     return None
 
 
-class TestRunnerArtOnHost(TestRunner):
+class TestRunnerArtOnHost(TestRunnerWithHostCompilation):
   """Abstract test runner of Art on host."""
 
-  def  __init__(self, extra_args=None):
+  def  __init__(self, use_dx, extra_args=None):
     """Constructor for the Art on host tester.
 
     Args:
+      use_dx: boolean, if True use dx rather than jack
       extra_args: list of strings, extra arguments for dalvikvm
     """
+    super().__init__(use_dx)
     self._art_cmd = ['/bin/bash', 'art', '-cp', 'classes.dex']
     if extra_args is not None:
       self._art_cmd += extra_args
     self._art_cmd.append('Test')
-    self._jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.',
-                       'Test.java']
 
   def CompileAndRunTest(self):
-    if RunCommand(['jack'] + self._jack_args, out=None, err='jackerr.txt',
-                  timeout=30) == RetCode.SUCCESS:
+    if self.CompileOnHost() == RetCode.SUCCESS:
       retc = RunCommand(self._art_cmd, self.output_file, 'arterr.txt')
     else:
       retc = RetCode.NOTCOMPILED
@@ -164,9 +191,13 @@
 class TestRunnerArtIntOnHost(TestRunnerArtOnHost):
   """Concrete test runner of interpreter mode Art on host."""
 
-  def  __init__(self):
-    """Constructor."""
-    super().__init__(['-Xint'])
+  def  __init__(self, use_dx):
+    """Constructor for the Art on host tester (interpreter).
+
+    Args:
+      use_dx: boolean, if True use dx rather than jack
+   """
+    super().__init__(use_dx, ['-Xint'])
 
   @property
   def description(self):
@@ -183,9 +214,13 @@
 class TestRunnerArtOptOnHost(TestRunnerArtOnHost):
   """Concrete test runner of optimizing compiler mode Art on host."""
 
-  def  __init__(self):
-    """Constructor."""
-    super().__init__(None)
+  def  __init__(self, use_dx):
+    """Constructor for the Art on host tester (optimizing).
+
+    Args:
+      use_dx: boolean, if True use dx rather than jack
+   """
+    super().__init__(use_dx, None)
 
   @property
   def description(self):
@@ -201,28 +236,27 @@
     return ['--raw-cmd={0}'.format(cmd_str), '--timeout', str(30)]
 
 
-class TestRunnerArtOnTarget(TestRunner):
+class TestRunnerArtOnTarget(TestRunnerWithHostCompilation):
   """Abstract test runner of Art on target."""
 
-  def  __init__(self, device, extra_args=None):
+  def  __init__(self, use_dx, device, extra_args=None):
     """Constructor for the Art on target tester.
 
     Args:
+      use_dx: boolean, if True use dx rather than jack
       device: string, target device serial number (or None)
       extra_args: list of strings, extra arguments for dalvikvm
     """
+    super().__init__(use_dx)
     self._test_env = DeviceTestEnv('jfuzz_', specific_device=device)
     self._dalvik_cmd = ['dalvikvm']
     if extra_args is not None:
       self._dalvik_cmd += extra_args
     self._device = device
-    self._jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.',
-                       'Test.java']
     self._device_classpath = None
 
   def CompileAndRunTest(self):
-    if RunCommand(['jack'] + self._jack_args, out=None, err='jackerr.txt',
-                   timeout=30) == RetCode.SUCCESS:
+    if self.CompileOnHost() == RetCode.SUCCESS:
       self._device_classpath = self._test_env.PushClasspath('classes.dex')
       cmd = self._dalvik_cmd + ['-cp', self._device_classpath, 'Test']
       (output, retc) = self._test_env.RunCommand(
@@ -247,13 +281,14 @@
 class TestRunnerArtIntOnTarget(TestRunnerArtOnTarget):
   """Concrete test runner of interpreter mode Art on target."""
 
-  def  __init__(self, device):
-    """Constructor.
+  def  __init__(self, use_dx, device):
+    """Constructor for the Art on target tester (interpreter).
 
     Args:
+      use_dx: boolean, if True use dx rather than jack
       device: string, target device serial number (or None)
     """
-    super().__init__(device, ['-Xint'])
+    super().__init__(use_dx, device, ['-Xint'])
 
   @property
   def description(self):
@@ -270,13 +305,14 @@
 class TestRunnerArtOptOnTarget(TestRunnerArtOnTarget):
   """Concrete test runner of optimizing compiler mode Art on target."""
 
-  def  __init__(self, device):
-    """Constructor.
+  def  __init__(self, use_dx, device):
+    """Constructor for the Art on target tester (optimizing).
 
     Args:
+      use_dx: boolean, if True use dx rather than jack
       device: string, target device serial number (or None)
     """
-    super().__init__(device, None)
+    super().__init__(use_dx, device, None)
 
   @property
   def description(self):
@@ -306,7 +342,7 @@
   """Tester that runs JFuzz many times and report divergences."""
 
   def  __init__(self, num_tests, device, mode1, mode2, jfuzz_args,
-                report_script, true_divergence_only):
+                report_script, true_divergence_only, use_dx):
     """Constructor for the tester.
 
     Args:
@@ -317,14 +353,16 @@
       jfuzz_args: list of strings, additional arguments for jfuzz
       report_script: string, path to script called for each divergence
       true_divergence_only: boolean, if True don't bisect timeout divergences
+      use_dx: boolean, if True use dx rather than jack
     """
     self._num_tests = num_tests
     self._device = device
-    self._runner1 = GetExecutionModeRunner(device, mode1)
-    self._runner2 = GetExecutionModeRunner(device, mode2)
+    self._runner1 = GetExecutionModeRunner(use_dx, device, mode1)
+    self._runner2 = GetExecutionModeRunner(use_dx, device, mode2)
     self._jfuzz_args = jfuzz_args
     self._report_script = report_script
     self._true_divergence_only = true_divergence_only
+    self._use_dx = use_dx
     self._save_dir = None
     self._results_dir = None
     self._jfuzz_dir = None
@@ -367,6 +405,7 @@
     print('Directory :', self._results_dir)
     print('Exec-mode1:', self._runner1.description)
     print('Exec-mode2:', self._runner2.description)
+    print('Compiler  :', 'dx' if self._use_dx else 'jack')
     print()
     self.ShowStats()
     for self._test in range(1, self._num_tests + 1):
@@ -551,18 +590,21 @@
   parser.add_argument('--mode2', default='hopt',
                       help='execution mode 2 (default: hopt)')
   parser.add_argument('--report_script', help='script called for each'
-                                              'divergence')
+                                              ' divergence')
   parser.add_argument('--jfuzz_arg', default=[], dest='jfuzz_args',
                       action='append', help='argument for jfuzz')
   parser.add_argument('--true_divergence', default=False, action='store_true',
                       help='don\'t bisect timeout divergences')
+  parser.add_argument('--use_dx', default=False, action='store_true',
+                      help='use old-style dx (rather than jack)')
   args = parser.parse_args()
   if args.mode1 == args.mode2:
     raise FatalError('Identical execution modes given')
   # Run the JFuzz tester.
-  with JFuzzTester(args.num_tests, args.device, args.mode1, args.mode2,
+  with JFuzzTester(args.num_tests,
+                   args.device, args.mode1, args.mode2,
                    args.jfuzz_args, args.report_script,
-                   args.true_divergence) as fuzzer:
+                   args.true_divergence, args.use_dx) as fuzzer:
     fuzzer.Run()
 
 if __name__ == '__main__':
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 53fe8fe..dcef8c0 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -169,7 +169,8 @@
   description: "Lack of IPv6 on some buildbot slaves",
   result: EXEC_FAILED,
   names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
-          "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"],
+          "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6",
+          "libcore.io.OsTest#test_recvfrom_EmptyPacket"],
   bug: 25178637
 },
 {